From 386cca47977e0f933e90a78ec6bab44963df652f Mon Sep 17 00:00:00 2001 From: Liao Shihua Date: Fri, 10 May 2024 16:12:35 +0800 Subject: [PATCH 01/10] Mininal Support XSFVQMACC --- gcc/common/config/riscv/riscv-common.cc | 4 ++++ gcc/config/riscv/riscv.opt | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc index 60595a3e3561..13af96aa52b2 100644 --- a/gcc/common/config/riscv/riscv-common.cc +++ b/gcc/common/config/riscv/riscv-common.cc @@ -429,6 +429,8 @@ static const struct riscv_ext_version riscv_ext_version_table[] = {"xsfvcp", ISA_SPEC_CLASS_NONE, 1, 0}, {"xsfcease", ISA_SPEC_CLASS_NONE, 1, 0}, + {"xsfvqmaccqoq", ISA_SPEC_CLASS_NONE, 1, 0}, + {"xsfvqmaccdod", ISA_SPEC_CLASS_NONE, 1, 0}, /* Terminate the list. */ {NULL, ISA_SPEC_CLASS_NONE, 0, 0} @@ -1758,6 +1760,8 @@ static const riscv_ext_flag_table_t riscv_ext_flag_table[] = RISCV_EXT_FLAG_ENTRY ("xsfvcp", x_riscv_sifive_subext, MASK_XSFVCP), RISCV_EXT_FLAG_ENTRY ("xsfcease", x_riscv_sifive_subext, MASK_XSFCEASE), + RISCV_EXT_FLAG_ENTRY ("xsfvqmaccqoq", x_riscv_sifive_subext, MASK_XSFVQMACCQOQ), + RISCV_EXT_FLAG_ENTRY ("xsfvqmaccdod", x_riscv_sifive_subext, MASK_XSFVQMACCDOD), {NULL, NULL, NULL, 0} }; diff --git a/gcc/config/riscv/riscv.opt b/gcc/config/riscv/riscv.opt index 5bc5d3002934..6f1d7c1b90c4 100644 --- a/gcc/config/riscv/riscv.opt +++ b/gcc/config/riscv/riscv.opt @@ -523,6 +523,10 @@ Mask(XSFVCP) Var(riscv_sifive_subext) Mask(XSFCEASE) Var(riscv_sifive_subext) +Mask(XSFVQMACCDOD) Var(riscv_sifive_subext) + +Mask(XSFVQMACCQOQ) Var(riscv_sifive_subext) + Enum Name(isa_spec_class) Type(enum riscv_isa_spec_class) Supported ISA specs (for use with the -misa-spec= option): From 068706a51a6d98fb83c5e09fd8de3542f4c008eb Mon Sep 17 00:00:00 2001 From: yulong Date: Sun, 29 Sep 2024 11:27:43 +0800 Subject: [PATCH 02/10] RISC-V: Add Xsfvqmacc extension intrinsics support. --- gcc/config/riscv/riscv-c.cc | 4 +- .../riscv/riscv-vector-builtins-bases.cc | 96 ++++++++++++ .../riscv/riscv-vector-builtins-bases.h | 4 + .../riscv/riscv-vector-builtins-shapes.cc | 34 +++++ .../riscv/riscv-vector-builtins-shapes.h | 1 + gcc/config/riscv/riscv-vector-builtins.cc | 102 +++++++++++++ gcc/config/riscv/riscv-vector-builtins.def | 4 + gcc/config/riscv/riscv-vector-builtins.h | 14 ++ gcc/config/riscv/riscv.md | 3 +- .../sifive-vector-builtins-functions.def | 19 +++ gcc/config/riscv/sifive-vector.md | 143 ++++++++++++++++++ gcc/config/riscv/t-riscv | 1 + gcc/config/riscv/vector-iterators.md | 18 +++ gcc/config/riscv/vector.md | 1 + 14 files changed, 442 insertions(+), 2 deletions(-) create mode 100644 gcc/config/riscv/sifive-vector-builtins-functions.def create mode 100644 gcc/config/riscv/sifive-vector.md diff --git a/gcc/config/riscv/riscv-c.cc b/gcc/config/riscv/riscv-c.cc index c59f408d3a8e..4258b22d40b4 100644 --- a/gcc/config/riscv/riscv-c.cc +++ b/gcc/config/riscv/riscv-c.cc @@ -220,6 +220,7 @@ riscv_cpu_cpp_builtins (cpp_reader *pfile) builtin_define_with_int_value ("__riscv_th_v_intrinsic", riscv_ext_version_value (0, 11)); + /* Define architecture extension test macros. */ builtin_define_with_int_value ("__riscv_arch_test", 1); @@ -270,7 +271,8 @@ riscv_pragma_intrinsic (cpp_reader *) const char *name = TREE_STRING_POINTER (x); if (strcmp (name, "vector") == 0 - || strcmp (name, "xtheadvector") == 0) + || strcmp (name, "xtheadvector") == 0 + || strcmp (name, "sifive_vector")) { struct pragma_intrinsic_flags backup_flags; diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc index b8c337f4e77a..6fe56f9d105a 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.cc +++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc @@ -869,6 +869,94 @@ class vwmaccus : public function_base } }; +/* Implements vqmacc. */ +class vqmacc : public function_base +{ +public: + bool has_merge_operand_p () const override { return false; } + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred == PRED_TYPE_tu; + } + + rtx expand (function_expander &e) const override + { + if (e.op_info->op == OP_TYPE_4x8x4) + return e.use_widen_ternop_insn ( + code_for_pred_quad_mul_plus_qoq (SIGN_EXTEND, e.vector_mode ())); + if (e.op_info->op == OP_TYPE_2x8x2) + return e.use_widen_ternop_insn ( + code_for_pred_quad_mul_plus_dod (SIGN_EXTEND, e.vector_mode ())); + gcc_unreachable (); + } +}; + +/* Implements vqmaccu. */ +class vqmaccu : public function_base +{ +public: + bool has_merge_operand_p () const override { return false; } + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred == PRED_TYPE_tu; + } + + rtx expand (function_expander &e) const override + { + if (e.op_info->op == OP_TYPE_4x8x4) + return e.use_widen_ternop_insn ( + code_for_pred_quad_mul_plus_qoq (ZERO_EXTEND, e.vector_mode ())); + if (e.op_info->op == OP_TYPE_2x8x2) + return e.use_widen_ternop_insn ( + code_for_pred_quad_mul_plus_dod (SIGN_EXTEND, e.vector_mode ())); + gcc_unreachable (); + } +}; + +/* Implements vqmaccsu. */ +class vqmaccsu : public function_base +{ +public: + bool has_merge_operand_p () const override { return false; } + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred == PRED_TYPE_tu; + } + + rtx expand (function_expander &e) const override + { + if (e.op_info->op == OP_TYPE_4x8x4) + return e.use_widen_ternop_insn ( + code_for_pred_quad_mul_plussu_qoq (e.vector_mode ())); + if (e.op_info->op == OP_TYPE_2x8x2) + return e.use_widen_ternop_insn ( + code_for_pred_quad_mul_plussu_dod (e.vector_mode ())); + gcc_unreachable (); + } +}; + +/* Implements vqmaccus. */ +class vqmaccus : public function_base +{ +public: + bool has_merge_operand_p () const override { return false; } + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred == PRED_TYPE_tu; + } + + rtx expand (function_expander &e) const override + { + if (e.op_info->op == OP_TYPE_4x8x4) + return e.use_widen_ternop_insn ( + code_for_pred_quad_mul_plusus_qoq (e.vector_mode ())); + if (e.op_info->op == OP_TYPE_2x8x2) + return e.use_widen_ternop_insn ( + code_for_pred_quad_mul_plusus_dod (e.vector_mode ())); + gcc_unreachable (); + } +}; + /* Implements vmand/vmnand/vmandn/vmxor/vmor/vmnor/vmorn/vmxnor */ template class mask_logic : public function_base @@ -2762,6 +2850,10 @@ static CONSTEXPR const th_loadstore_width vs static CONSTEXPR const th_loadstore_width vsuxh_obj; static CONSTEXPR const th_loadstore_width vsuxw_obj; static CONSTEXPR const th_extract vext_x_v_obj; +static CONSTEXPR const vqmacc vqmacc_obj; +static CONSTEXPR const vqmaccu vqmaccu_obj; +static CONSTEXPR const vqmaccsu vqmaccsu_obj; +static CONSTEXPR const vqmaccsu vqmaccus_obj; /* Crypto Vector */ static CONSTEXPR const vandn vandn_obj; @@ -3092,6 +3184,10 @@ BASE (vsuxb) BASE (vsuxh) BASE (vsuxw) BASE (vext_x_v) +BASE (vqmacc) +BASE (vqmaccu) +BASE (vqmaccsu) +BASE (vqmaccus) /* Crypto vector */ BASE (vandn) BASE (vbrev) diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.h b/gcc/config/riscv/riscv-vector-builtins-bases.h index af1cb1af50f0..9b9e795c6efa 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.h +++ b/gcc/config/riscv/riscv-vector-builtins-bases.h @@ -311,6 +311,10 @@ extern const function_base *const vsuxb; extern const function_base *const vsuxh; extern const function_base *const vsuxw; extern const function_base *const vext_x_v; +extern const function_base *const vqmacc; +extern const function_base *const vqmaccu; +extern const function_base *const vqmaccsu; +extern const function_base *const vqmaccus; /* Below function_base are Vectro Crypto*/ extern const function_base *const vandn; extern const function_base *const vbrev; diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc index 22cbbc215954..b4c43d387153 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc @@ -1287,6 +1287,39 @@ struct crypto_vv_no_op_type_def : public build_base } }; +/* sf_vqmacc_def class. */ +struct sf_vqmacc_def : public build_base +{ + char *get_name (function_builder &b, const function_instance &instance, + bool overloaded_p) const override + { + /* Return nullptr if it can not be overloaded. */ + if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred)) + return nullptr; + + b.append_name ("__riscv_sf_"); + b.append_name (instance.base_name); + + /* vop_v --> vop_v_. */ + if (!overloaded_p) + { + /* vop --> vop_v. */ + b.append_name (operand_suffixes[instance.op_info->op]); + /* vop_v --> vop_v_. */ + b.append_name (type_suffixes[instance.type.index].vector); + } + + /* According to SIFIVE vector-intrinsic-doc, it does not add "_m" suffix + for vop_m C++ overloaded API.*/ + if (overloaded_p && instance.pred == PRED_TYPE_tu) + { + b.append_name (predication_suffixes[instance.pred]); + } + + return b.finish_name (); + } +}; + SHAPE(vsetvl, vsetvl) SHAPE(vsetvl, vsetvlmax) SHAPE(loadstore, loadstore) @@ -1321,4 +1354,5 @@ SHAPE(seg_fault_load, seg_fault_load) SHAPE(crypto_vv, crypto_vv) SHAPE(crypto_vi, crypto_vi) SHAPE(crypto_vv_no_op_type, crypto_vv_no_op_type) +SHAPE(sf_vqmacc,sf_vqmacc) } // end namespace riscv_vector diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.h b/gcc/config/riscv/riscv-vector-builtins-shapes.h index 3de837c158e0..0a3473cfa23f 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.h +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.h @@ -59,6 +59,7 @@ extern const function_shape *const seg_fault_load; extern const function_shape *const crypto_vv; extern const function_shape *const crypto_vi; extern const function_shape *const crypto_vv_no_op_type; +extern const function_shape *const sf_vqmacc; } } // end namespace riscv_vector diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc index 458d9b0886e3..ffaec36192d5 100644 --- a/gcc/config/riscv/riscv-vector-builtins.cc +++ b/gcc/config/riscv/riscv-vector-builtins.cc @@ -855,6 +855,29 @@ static CONSTEXPR const rvv_arg_type_info us_wwxv_args[] rvv_arg_type_info (RVV_BASE_double_trunc_vector), rvv_arg_type_info_end}; +/* A static operand information for vector_type func (vector_type, quad demote + * type, quad demote type) function registration. */ +static CONSTEXPR const rvv_arg_type_info qqvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_trunc_vector), + rvv_arg_type_info (RVV_BASE_quad_trunc_vector), rvv_arg_type_info_end}; + +/* A list of args for vector_type func (vector_type, quad demote type, quad + * demote type) function. */ +static CONSTEXPR const rvv_arg_type_info su_qqvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_trunc_vector), + rvv_arg_type_info (RVV_BASE_quad_trunc_unsigned_vector), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (vector_type, quad demote type, quad + * demote type) function. */ +static CONSTEXPR const rvv_arg_type_info us_qqvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_trunc_unsigned_vector), + rvv_arg_type_info (RVV_BASE_quad_trunc_vector), + rvv_arg_type_info_end}; + /* A list of args for vector_type func (signed double demote type, * unsigneddouble demote type) function. */ static CONSTEXPR const rvv_arg_type_info su_wvv_args[] @@ -1743,6 +1766,18 @@ static CONSTEXPR const rvv_op_info u_to_nf_xu_w_ops rvv_arg_type_info (RVV_BASE_double_trunc_float_vector), /* Return type */ v_args /* Args */}; +static CONSTEXPR const rvv_op_info u_to_nf_4x8x4_ops + = {wconvert_u_ops, /* Types */ + OP_TYPE_4x8x4, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vlmul_ext_x8), /* Return type */ + v_args /* Args */}; + +static CONSTEXPR const rvv_op_info u_to_nf_2x8x2_ops + = {wconvert_u_ops, /* Types */ + OP_TYPE_2x8x2, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vlmul_ext_x8), /* Return type */ + v_args /* Args */}; + /* A static operand information for vector_type func (vector_type) * function registration. */ static CONSTEXPR const rvv_op_info f_to_u_f_v_ops @@ -2279,6 +2314,70 @@ static CONSTEXPR const rvv_op_info i_us_wwxv_ops rvv_arg_type_info (RVV_BASE_vector), /* Return type */ us_wwxv_args /* Args */}; +/* A static operand information for vector_type func (vector_type, double demote + * type, double demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_qqvv_ops + = {qexti_ops, /* Types */ + OP_TYPE_4x8x4, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, double demote + * type, double demote type) function registration. */ +static CONSTEXPR const rvv_op_info u_qqvv_ops + = {qextu_ops, /* Types */ + OP_TYPE_4x8x4, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, double demote + * type, double demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_su_qqvv_ops + = {qexti_ops, /* Types */ + OP_TYPE_4x8x4, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + su_qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, double demote + * type, double demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_us_qqvv_ops + = {qexti_ops, /* Types */ + OP_TYPE_4x8x4, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + us_qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, double demote + * type, double demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_qdvv_ops + = {qexti_ops, /* Types */ + OP_TYPE_2x8x2, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, double demote + * type, double demote type) function registration. */ +static CONSTEXPR const rvv_op_info u_qdvv_ops + = {qextu_ops, /* Types */ + OP_TYPE_2x8x2, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, double demote + * type, double demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_su_qdvv_ops + = {qexti_ops, /* Types */ + OP_TYPE_2x8x2, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + su_qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, double demote + * type, double demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_us_qdvv_ops + = {qexti_ops, /* Types */ + OP_TYPE_2x8x2, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + us_qqvv_args /* Args */}; + /* A static operand information for vector_type func (signed double demote type, * unsigned double demote type) function registration. */ static CONSTEXPR const rvv_op_info i_su_wvv_ops @@ -2950,6 +3049,9 @@ static function_group_info function_groups[] = { #define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) \ {#NAME, &bases::NAME, &shapes::SHAPE, PREDS, OPS_INFO, REQUIRED_EXTENSIONS}, #include "thead-vector-builtins-functions.def" +#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) \ + {#NAME, &bases::NAME, &shapes::SHAPE, PREDS, OPS_INFO, REQUIRED_EXTENSIONS}, +#include "sifive-vector-builtins-functions.def" }; /* The RVV types, with their built-in diff --git a/gcc/config/riscv/riscv-vector-builtins.def b/gcc/config/riscv/riscv-vector-builtins.def index ffa14d46dbc8..89381d7b9ec0 100644 --- a/gcc/config/riscv/riscv-vector-builtins.def +++ b/gcc/config/riscv/riscv-vector-builtins.def @@ -634,6 +634,8 @@ DEF_RVV_OP_TYPE (xu_v) DEF_RVV_OP_TYPE (f_w) DEF_RVV_OP_TYPE (xu_w) DEF_RVV_OP_TYPE (s) +DEF_RVV_OP_TYPE (4x8x4) +DEF_RVV_OP_TYPE (2x8x2) DEF_RVV_PRED_TYPE (ta) DEF_RVV_PRED_TYPE (tu) @@ -714,6 +716,8 @@ DEF_RVV_BASE_TYPE (vlmul_ext_x32, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (vlmul_ext_x64, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (size_ptr, build_pointer_type (size_type_node)) DEF_RVV_BASE_TYPE (tuple_subpart, get_tuple_subpart_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_trunc_signed_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_trunc_unsigned_vector, get_vector_type (type_idx)) DEF_RVV_VXRM_ENUM (RNU, VXRM_RNU) DEF_RVV_VXRM_ENUM (RNE, VXRM_RNE) diff --git a/gcc/config/riscv/riscv-vector-builtins.h b/gcc/config/riscv/riscv-vector-builtins.h index f092dbfa3bef..fec024d9f948 100644 --- a/gcc/config/riscv/riscv-vector-builtins.h +++ b/gcc/config/riscv/riscv-vector-builtins.h @@ -127,6 +127,8 @@ enum required_ext XTHEADVECTOR_EXT, /* XTheadVector extension */ ZVFBFMIN_EXT, /* Zvfbfmin extension */ ZVFBFWMA_EXT, /* Zvfbfwma extension */ + XSFVQMACCQOQ_EXT, /* XSFVQMACCQOQ extension */ + XSFVQMACCDOD_EXT, /* XSFVQMACCDOD extension */ /* Please update below to isa_name func when add or remove enum type(s). */ }; @@ -160,6 +162,10 @@ static inline const char * required_ext_to_isa_name (enum required_ext required) return "zvfbfmin"; case ZVFBFWMA_EXT: return "zvfbfwma"; + case XSFVQMACCQOQ_EXT: + return "xsfvqmaccqoq"; + case XSFVQMACCDOD_EXT: + return "xsfvqmaccdod"; default: gcc_unreachable (); } @@ -197,6 +203,10 @@ static inline bool required_extensions_specified (enum required_ext required) return TARGET_ZVFBFMIN; case ZVFBFWMA_EXT: return TARGET_ZVFBFWMA; + case XSFVQMACCQOQ_EXT: + return TARGET_XSFVQMACCQOQ; + case XSFVQMACCDOD_EXT: + return TARGET_XSFVQMACCDOD; default: gcc_unreachable (); } @@ -337,6 +347,10 @@ struct function_group_info return TARGET_ZVFBFMIN; case ZVFBFWMA_EXT: return TARGET_ZVFBFWMA; + case XSFVQMACCQOQ_EXT: + return TARGET_XSFVQMACCQOQ; + case XSFVQMACCDOD_EXT: + return TARGET_XSFVQMACCDOD; default: gcc_unreachable (); } diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md index 5b7b73535a3a..0eb23e3a349a 100644 --- a/gcc/config/riscv/riscv.md +++ b/gcc/config/riscv/riscv.md @@ -381,6 +381,7 @@ ;; vidiv vector single-width integer divide instructions ;; viwmul vector widening integer multiply instructions ;; vimuladd vector single-width integer multiply-add instructions +;; vsfmuladd vector matrix integer multiply-add instructions ;; viwmuladd vector widening integer multiply-add instructions ;; vimerge vector integer merge instructions ;; vimov vector integer move vector instructions @@ -485,7 +486,7 @@ vldux,vldox,vstux,vstox,vldff,vldr,vstr, vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff, vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax, - vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov, + vimul,vidiv,viwmul,vimuladd,vsfmuladd,viwmuladd,vimerge,vimov, vsalu,vaalu,vsmul,vsshift,vnclip, vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp, vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov, diff --git a/gcc/config/riscv/sifive-vector-builtins-functions.def b/gcc/config/riscv/sifive-vector-builtins-functions.def new file mode 100644 index 000000000000..b3ef445dba78 --- /dev/null +++ b/gcc/config/riscv/sifive-vector-builtins-functions.def @@ -0,0 +1,19 @@ +#ifndef DEF_RVV_FUNCTION +#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) +#endif + +#define REQUIRED_EXTENSIONS XSFVQMACCQOQ_EXT +DEF_RVV_FUNCTION (vqmaccu, sf_vqmacc, none_tu_preds, u_qqvv_ops) +DEF_RVV_FUNCTION (vqmacc, sf_vqmacc, none_tu_preds, i_qqvv_ops) +DEF_RVV_FUNCTION (vqmaccsu, sf_vqmacc, none_tu_preds, i_su_qqvv_ops) +DEF_RVV_FUNCTION (vqmaccus, sf_vqmacc, none_tu_preds, i_us_qqvv_ops) +#undef REQUIRED_EXTENSIONS + +#define REQUIRED_EXTENSIONS XSFVQMACCDOD_EXT +DEF_RVV_FUNCTION (vqmaccu, sf_vqmacc, none_tu_preds, u_qdvv_ops) +DEF_RVV_FUNCTION (vqmacc, sf_vqmacc, none_tu_preds, i_qdvv_ops) +DEF_RVV_FUNCTION (vqmaccsu, sf_vqmacc, none_tu_preds, i_su_qdvv_ops) +DEF_RVV_FUNCTION (vqmaccus, sf_vqmacc, none_tu_preds, i_us_qdvv_ops) +#undef REQUIRED_EXTENSIONS + +#undef DEF_RVV_FUNCTION diff --git a/gcc/config/riscv/sifive-vector.md b/gcc/config/riscv/sifive-vector.md new file mode 100644 index 000000000000..01ffa6f13f78 --- /dev/null +++ b/gcc/config/riscv/sifive-vector.md @@ -0,0 +1,143 @@ +(define_insn "@pred_quad_mul_plus_qoq" + [(set (match_operand:V_SF 0 "register_operand" "=&vr") + (if_then_else:V_SF + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:V_SF + (mult:V_SF + (any_extend:V_SF + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (any_extend:V_SF + (match_operand: 4 "register_operand" " vr"))) + (match_operand:V_SF 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCQOQ" + "sf.vqmacc.4x8x4\t%0,%3,%4" + [(set_attr "type" "vsfmuladd") + (set_attr "mode" "")]) + +(define_insn "@pred_quad_mul_plussu_qoq" + [(set (match_operand:V_SF 0 "register_operand" "=&vr") + (if_then_else:V_SF + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:V_SF + (mult:V_SF + (sign_extend:V_SF + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (zero_extend:V_SF + (match_operand: 4 "register_operand" " vr"))) + (match_operand:V_SF 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCQOQ" + "sf.vqmaccsu.4x8x4\t%0,%3,%4" + [(set_attr "type" "vsfmuladd") + (set_attr "mode" "")]) + +(define_insn "@pred_quad_mul_plusus_qoq" + [(set (match_operand:V_SF 0 "register_operand" "=&vr") + (if_then_else:V_SF + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:V_SF + (mult:V_SF + (zero_extend:V_SF + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (sign_extend:V_SF + (match_operand: 4 "register_operand" " vr"))) + (match_operand:V_SF 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCQOQ" + "sf.vqmaccus.4x8x4\t%0,%3,%4" + [(set_attr "type" "vsfmuladd") + (set_attr "mode" "")]) + +(define_insn "@pred_quad_mul_plus_dod" + [(set (match_operand:V_SF 0 "register_operand" "=&vr") + (if_then_else:V_SF + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:V_SF + (mult:V_SF + (any_extend:V_SF + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (any_extend:V_SF + (match_operand: 4 "register_operand" " vr"))) + (match_operand:V_SF 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCDOD" + "sf.vqmacc.4x8x4\t%0,%3,%4" + [(set_attr "type" "vsfmuladd") + (set_attr "mode" "")]) + +(define_insn "@pred_quad_mul_plussu_dod" + [(set (match_operand:V_SF 0 "register_operand" "=&vr") + (if_then_else:V_SF + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:V_SF + (mult:V_SF + (sign_extend:V_SF + (match_operand: 3 "register_operand" " vr")) + (zero_extend:V_SF + (match_operand: 4 "register_operand" " vr"))) + (match_operand:V_SF 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCDOD" + "sf.vqmaccsu.4x8x4\t%0,%3,%4" + [(set_attr "type" "vsfmuladd") + (set_attr "mode" "")]) + +(define_insn "@pred_quad_mul_plusus_dod" + [(set (match_operand:V_SF 0 "register_operand" "=&vr") + (if_then_else:V_SF + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:V_SF + (mult:V_SF + (zero_extend:V_SF + (match_operand: 3 "register_operand" " vr")) + (sign_extend:V_SF + (match_operand: 4 "register_operand" " vr"))) + (match_operand:V_SF 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCDOD" + "sf.vqmaccus.4x8x4\t%0,%3,%4" + [(set_attr "type" "vsfmuladd") + (set_attr "mode" "")]) diff --git a/gcc/config/riscv/t-riscv b/gcc/config/riscv/t-riscv index 38494320d8b2..c076fee84b0f 100644 --- a/gcc/config/riscv/t-riscv +++ b/gcc/config/riscv/t-riscv @@ -2,6 +2,7 @@ RISCV_BUILTINS_H = $(srcdir)/config/riscv/riscv-vector-builtins.h \ $(srcdir)/config/riscv/riscv-vector-builtins.def \ $(srcdir)/config/riscv/riscv-vector-builtins-functions.def \ $(srcdir)/config/riscv/thead-vector-builtins-functions.def \ + $(srcdir)/config/riscv/sifive-vector-builtins-functions.def \ riscv-vector-type-indexer.gen.def riscv-builtins.o: $(srcdir)/config/riscv/riscv-builtins.cc $(CONFIG_H) \ diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md index 43325d1ba87a..0de750e01120 100644 --- a/gcc/config/riscv/vector-iterators.md +++ b/gcc/config/riscv/vector-iterators.md @@ -4512,3 +4512,21 @@ (V256DF "v64df") (V512DF "v128df") ]) + +(define_mode_iterator V_SF [ + RVVM8SI RVVM4SI RVVM2SI RVVM1SI +]) + +(define_mode_attr V_SF_QMACC [ + (RVVM8SI "RVVM4QI") + (RVVM4SI "RVVM2QI") + (RVVM2SI "RVVM1QI") + (RVVM1SI "RVVMF2QI") +]) + +(define_mode_attr v_sf_qmacc [ + (RVVM8SI "rvvm4qi") + (RVVM4SI "rvvm2qi") + (RVVM2SI "rvvm1qi") + (RVVM1SI "rvvmf2qi") +]) diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md index a21288f7af2a..88b437e62ee9 100644 --- a/gcc/config/riscv/vector.md +++ b/gcc/config/riscv/vector.md @@ -8531,3 +8531,4 @@ (include "autovec.md") (include "autovec-opt.md") +(include "sifive-vector.md") From 5a1a98805375933788743dd3f05f77b193407b3b Mon Sep 17 00:00:00 2001 From: yulong Date: Tue, 8 Oct 2024 17:28:06 +0800 Subject: [PATCH 03/10] Drop blank lines --- gcc/config/riscv/riscv-c.cc | 1 - gcc/config/riscv/riscv-vector-builtins-shapes.cc | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/gcc/config/riscv/riscv-c.cc b/gcc/config/riscv/riscv-c.cc index 4258b22d40b4..4ffe4c0ca06c 100644 --- a/gcc/config/riscv/riscv-c.cc +++ b/gcc/config/riscv/riscv-c.cc @@ -220,7 +220,6 @@ riscv_cpu_cpp_builtins (cpp_reader *pfile) builtin_define_with_int_value ("__riscv_th_v_intrinsic", riscv_ext_version_value (0, 11)); - /* Define architecture extension test macros. */ builtin_define_with_int_value ("__riscv_arch_test", 1); diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc index b4c43d387153..6d1e31e7bab8 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc @@ -1354,5 +1354,5 @@ SHAPE(seg_fault_load, seg_fault_load) SHAPE(crypto_vv, crypto_vv) SHAPE(crypto_vi, crypto_vi) SHAPE(crypto_vv_no_op_type, crypto_vv_no_op_type) -SHAPE(sf_vqmacc,sf_vqmacc) +SHAPE(sf_vqmacc, sf_vqmacc) } // end namespace riscv_vector From 8f9646a6fc8adddbdf7d7bd8c8b0502163bb1610 Mon Sep 17 00:00:00 2001 From: yulong Date: Tue, 15 Oct 2024 15:16:04 +0800 Subject: [PATCH 04/10] Add mininal support for XSFVFNRCLIPXFQF extension --- gcc/common/config/riscv/riscv-common.cc | 10 ++++++---- gcc/config/riscv/riscv.opt | 6 ++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc index 13af96aa52b2..fd638cc5db09 100644 --- a/gcc/common/config/riscv/riscv-common.cc +++ b/gcc/common/config/riscv/riscv-common.cc @@ -431,6 +431,7 @@ static const struct riscv_ext_version riscv_ext_version_table[] = {"xsfcease", ISA_SPEC_CLASS_NONE, 1, 0}, {"xsfvqmaccqoq", ISA_SPEC_CLASS_NONE, 1, 0}, {"xsfvqmaccdod", ISA_SPEC_CLASS_NONE, 1, 0}, + {"xsfvfnrclipxfqf", ISA_SPEC_CLASS_NONE, 1, 0}, /* Terminate the list. */ {NULL, ISA_SPEC_CLASS_NONE, 0, 0} @@ -1758,10 +1759,11 @@ static const riscv_ext_flag_table_t riscv_ext_flag_table[] = RISCV_EXT_FLAG_ENTRY ("xventanacondops", x_riscv_xventana_subext, MASK_XVENTANACONDOPS), - RISCV_EXT_FLAG_ENTRY ("xsfvcp", x_riscv_sifive_subext, MASK_XSFVCP), - RISCV_EXT_FLAG_ENTRY ("xsfcease", x_riscv_sifive_subext, MASK_XSFCEASE), - RISCV_EXT_FLAG_ENTRY ("xsfvqmaccqoq", x_riscv_sifive_subext, MASK_XSFVQMACCQOQ), - RISCV_EXT_FLAG_ENTRY ("xsfvqmaccdod", x_riscv_sifive_subext, MASK_XSFVQMACCDOD), + RISCV_EXT_FLAG_ENTRY ("xsfvcp", x_riscv_sifive_subext, MASK_XSFVCP), + RISCV_EXT_FLAG_ENTRY ("xsfcease", x_riscv_sifive_subext, MASK_XSFCEASE), + RISCV_EXT_FLAG_ENTRY ("xsfvqmaccqoq", x_riscv_sifive_subext, MASK_XSFVQMACCQOQ), + RISCV_EXT_FLAG_ENTRY ("xsfvqmaccdod", x_riscv_sifive_subext, MASK_XSFVQMACCDOD), + RISCV_EXT_FLAG_ENTRY ("xsfvfnrclipxfqf", x_riscv_sifive_subext, MASK_XSFVFNRCLIPXFQF), {NULL, NULL, NULL, 0} }; diff --git a/gcc/config/riscv/riscv.opt b/gcc/config/riscv/riscv.opt index 6f1d7c1b90c4..628941f39e76 100644 --- a/gcc/config/riscv/riscv.opt +++ b/gcc/config/riscv/riscv.opt @@ -523,9 +523,11 @@ Mask(XSFVCP) Var(riscv_sifive_subext) Mask(XSFCEASE) Var(riscv_sifive_subext) -Mask(XSFVQMACCDOD) Var(riscv_sifive_subext) +Mask(XSFVQMACCDOD) Var(riscv_sifive_subext) -Mask(XSFVQMACCQOQ) Var(riscv_sifive_subext) +Mask(XSFVQMACCQOQ) Var(riscv_sifive_subext) + +Mask(XSFVFNRCLIPXFQF) Var(riscv_sifive_subext) Enum Name(isa_spec_class) Type(enum riscv_isa_spec_class) From e71de7e0d6b59ef0f0b64fd368b08e7b4986cd2a Mon Sep 17 00:00:00 2001 From: yulong Date: Fri, 25 Oct 2024 09:32:15 +0800 Subject: [PATCH 05/10] Add intrinsic support for XSFVFNRCLIPXFQF extension --- .../riscv/riscv-vector-builtins-bases.cc | 29 ++++++++++++ .../riscv/riscv-vector-builtins-bases.h | 2 + .../riscv/riscv-vector-builtins-shapes.cc | 38 +++++++++++++++ .../riscv/riscv-vector-builtins-shapes.h | 1 + gcc/config/riscv/riscv-vector-builtins.cc | 21 +++++++++ gcc/config/riscv/riscv-vector-builtins.def | 1 + gcc/config/riscv/riscv-vector-builtins.h | 18 +++++++ gcc/config/riscv/riscv.md | 3 +- .../sifive-vector-builtins-functions.def | 6 +++ gcc/config/riscv/sifive-vector.md | 47 +++++++++++++++++++ 10 files changed, 165 insertions(+), 1 deletion(-) diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc index 6fe56f9d105a..51337e5a3754 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.cc +++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc @@ -705,6 +705,31 @@ class vnclip : public function_base } }; +/* Implements vfnrclip. */ +class vfnrclip : public function_base +{ +public: + bool has_merge_operand_p () const override { return false; } + bool may_require_qfrm_p () const override { return true; } + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + if (pred == PRED_TYPE_mu || pred == PRED_TYPE_tu || pred == PRED_TYPE_tumu) + { + return true; + } + } + + rtx expand (function_expander &e) const override + { + if (e.op_info->op == OP_TYPE_f_qf) + { + return e.use_exact_insn ( + code_for_pred_fnr_clip (ZERO_EXTEND, e.vector_mode ())); + gcc_unreachable (); + } + } +}; + /* Implements vmseq/vmsne/vmslt/vmsgt/vmsle/vmsge. */ template class icmp : public function_base @@ -2661,6 +2686,8 @@ static CONSTEXPR const sat_op vssrl_obj; static CONSTEXPR const sat_op vssra_obj; static CONSTEXPR const vnclip vnclip_obj; static CONSTEXPR const vnclip vnclipu_obj; +static CONSTEXPR const vfnrclip x_obj; +static CONSTEXPR const vfnrclip xu_obj; static CONSTEXPR const mask_logic vmand_obj; static CONSTEXPR const mask_nlogic vmnand_obj; static CONSTEXPR const mask_notlogic vmandn_obj; @@ -2995,6 +3022,8 @@ BASE (vssra) BASE (vssrl) BASE (vnclip) BASE (vnclipu) +BASE (x) +BASE (xu) BASE (vmand) BASE (vmnand) BASE (vmandn) diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.h b/gcc/config/riscv/riscv-vector-builtins-bases.h index 9b9e795c6efa..a2e6b0b35ec6 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.h +++ b/gcc/config/riscv/riscv-vector-builtins-bases.h @@ -122,6 +122,8 @@ extern const function_base *const vssra; extern const function_base *const vssrl; extern const function_base *const vnclip; extern const function_base *const vnclipu; +extern const function_base *const x; +extern const function_base *const xu; extern const function_base *const vmand; extern const function_base *const vmnand; extern const function_base *const vmandn; diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc index 6d1e31e7bab8..3fe7ba4898d1 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc @@ -1320,6 +1320,43 @@ struct sf_vqmacc_def : public build_base } }; +/* sf_vfnrclip_def class. Handle instructions like vfnrclip. */ +struct sf_vfnrclip_def : public build_base +{ + char *get_name (function_builder &b, const function_instance &instance, + bool overloaded_p) const override + { + /* Return nullptr if it can not be overloaded. */ + if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred)) + return nullptr; + + b.append_name ("__riscv_sf_vfnrclip_"); + printf("aaaaaa %s\n", instance.base_name); + b.append_name (instance.base_name); + + if (!overloaded_p) + { + /* vop --> vop_. */ + b.append_name (operand_suffixes[instance.op_info->op]); + /* vop_v --> vop_v_. */ + b.append_name (type_suffixes[instance.type.index].vector); + /* vop_ --> vop__. + vector_type_index ret_type_idx + = instance.op_info->ret.get_function_type_index (instance.type.index); + b.append_name (type_suffixes[ret_type_idx].vector); */ + } + + /* According to rvv-intrinsic-doc, it does not add "_m" suffix + for vop_m C++ overloaded API.*/ + if (overloaded_p && (instance.pred == PRED_TYPE_tu || instance.pred == PRED_TYPE_mu || + instance.pred == PRED_TYPE_tumu)) + { + b.append_name (predication_suffixes[instance.pred]); + } + return b.finish_name (); + } +}; + SHAPE(vsetvl, vsetvl) SHAPE(vsetvl, vsetvlmax) SHAPE(loadstore, loadstore) @@ -1355,4 +1392,5 @@ SHAPE(crypto_vv, crypto_vv) SHAPE(crypto_vi, crypto_vi) SHAPE(crypto_vv_no_op_type, crypto_vv_no_op_type) SHAPE(sf_vqmacc, sf_vqmacc) +SHAPE(sf_vfnrclip, sf_vfnrclip) } // end namespace riscv_vector diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.h b/gcc/config/riscv/riscv-vector-builtins-shapes.h index 0a3473cfa23f..46d5cb4d24bd 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.h +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.h @@ -60,6 +60,7 @@ extern const function_shape *const crypto_vv; extern const function_shape *const crypto_vi; extern const function_shape *const crypto_vv_no_op_type; extern const function_shape *const sf_vqmacc; +extern const function_shape *const sf_vfnrclip; } } // end namespace riscv_vector diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc index ffaec36192d5..6c10811ff5d3 100644 --- a/gcc/config/riscv/riscv-vector-builtins.cc +++ b/gcc/config/riscv/riscv-vector-builtins.cc @@ -712,6 +712,11 @@ static CONSTEXPR const rvv_arg_type_info shift_wv_args[] rvv_arg_type_info (RVV_BASE_double_trunc_unsigned_vector), rvv_arg_type_info_end}; +static CONSTEXPR const rvv_arg_type_info clip_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_double_trunc_unsigned_vector), + rvv_arg_type_info_end}; + /* A list of args for vector_type func (vector_type) function. */ static CONSTEXPR const rvv_arg_type_info v_args[] = {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end}; @@ -2522,6 +2527,22 @@ static CONSTEXPR const rvv_op_info i_narrow_shift_vwx_ops rvv_arg_type_info (RVV_BASE_double_trunc_vector), /* Return type */ v_size_args /* Args */}; +/* A static operand information for double demote type func (vector_type, + * shift_type) function registration. */ +static CONSTEXPR const rvv_op_info u_clip_qf_ops + = {wextu_ops, /* Types */ + OP_TYPE_f_qf, /* Suffix */ + rvv_arg_type_info (RVV_BASE_double_trunc_unsigned_vector), /* Return type */ + clip_args /* Args */}; + +/* A static operand information for double demote type func (vector_type, + * shift_type) function registration. */ +static CONSTEXPR const rvv_op_info i_clip_qf_ops + = {wexti_ops, /* Types */ + OP_TYPE_f_qf, /* Suffix */ + rvv_arg_type_info (RVV_BASE_double_trunc_signed_vector), /* Return type */ + clip_args /* Args */}; + /* A static operand information for double demote type func (vector_type, * size_t) function registration. */ static CONSTEXPR const rvv_op_info u_narrow_shift_vwx_ops diff --git a/gcc/config/riscv/riscv-vector-builtins.def b/gcc/config/riscv/riscv-vector-builtins.def index 89381d7b9ec0..759a8ea82e61 100644 --- a/gcc/config/riscv/riscv-vector-builtins.def +++ b/gcc/config/riscv/riscv-vector-builtins.def @@ -636,6 +636,7 @@ DEF_RVV_OP_TYPE (xu_w) DEF_RVV_OP_TYPE (s) DEF_RVV_OP_TYPE (4x8x4) DEF_RVV_OP_TYPE (2x8x2) +DEF_RVV_OP_TYPE (f_qf) DEF_RVV_PRED_TYPE (ta) DEF_RVV_PRED_TYPE (tu) diff --git a/gcc/config/riscv/riscv-vector-builtins.h b/gcc/config/riscv/riscv-vector-builtins.h index fec024d9f948..d6fab90fbdf3 100644 --- a/gcc/config/riscv/riscv-vector-builtins.h +++ b/gcc/config/riscv/riscv-vector-builtins.h @@ -129,6 +129,7 @@ enum required_ext ZVFBFWMA_EXT, /* Zvfbfwma extension */ XSFVQMACCQOQ_EXT, /* XSFVQMACCQOQ extension */ XSFVQMACCDOD_EXT, /* XSFVQMACCDOD extension */ + XSFVFNRCLIPXFQF_EXT, /* XSFVFNRCLIPXFQF extension*/ /* Please update below to isa_name func when add or remove enum type(s). */ }; @@ -166,6 +167,8 @@ static inline const char * required_ext_to_isa_name (enum required_ext required) return "xsfvqmaccqoq"; case XSFVQMACCDOD_EXT: return "xsfvqmaccdod"; + case XSFVFNRCLIPXFQF_EXT: + return "xsfvfnrclipxfqf"; default: gcc_unreachable (); } @@ -207,6 +210,8 @@ static inline bool required_extensions_specified (enum required_ext required) return TARGET_XSFVQMACCQOQ; case XSFVQMACCDOD_EXT: return TARGET_XSFVQMACCDOD; + case XSFVFNRCLIPXFQF_EXT: + return TARGET_XSFVFNRCLIPXFQF; default: gcc_unreachable (); } @@ -351,6 +356,8 @@ struct function_group_info return TARGET_XSFVQMACCQOQ; case XSFVQMACCDOD_EXT: return TARGET_XSFVQMACCDOD; + case XSFVFNRCLIPXFQF_EXT: + return TARGET_XSFVFNRCLIPXFQF; default: gcc_unreachable (); } @@ -570,6 +577,9 @@ class function_base /* Return true if intrinsics maybe require vxrm operand. */ virtual bool may_require_vxrm_p () const; +/* Return true if intrinsics maybe require qfrm operand. */ + virtual bool may_require_qfrm_p () const; + /* Return true if intrinsics maybe require frm operand. */ virtual bool may_require_frm_p () const; @@ -846,6 +856,14 @@ function_base::may_require_vxrm_p () const return false; } +/* We choose to return false by default since most of the intrinsics does + not need qfrm operand. */ +inline bool +function_base::may_require_qfrm_p () const +{ + return false; +} + /* Since most of intrinsics can be overloaded, we set it true by default. */ inline bool function_base::can_be_overloaded_p (enum predication_type_index) const diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md index 0eb23e3a349a..ae76a755adf0 100644 --- a/gcc/config/riscv/riscv.md +++ b/gcc/config/riscv/riscv.md @@ -391,6 +391,7 @@ ;; vsmul vector single-width fractional multiply with rounding and saturation instructions ;; vsshift vector single-width scaling shift instructions ;; vnclip vector narrowing fixed-point clip instructions +;; vsfclip vector fp32 to int8 ranged clip instructions ;; 13. Vector floating-point instructions ;; vfalu vector single-width floating-point add/subtract instructions ;; vfwalu vector widening floating-point add/subtract instructions @@ -487,7 +488,7 @@ vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff, vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax, vimul,vidiv,viwmul,vimuladd,vsfmuladd,viwmuladd,vimerge,vimov, - vsalu,vaalu,vsmul,vsshift,vnclip, + vsalu,vaalu,vsmul,vsshift,vnclip,vsfclip, vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp, vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov, vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi, diff --git a/gcc/config/riscv/sifive-vector-builtins-functions.def b/gcc/config/riscv/sifive-vector-builtins-functions.def index b3ef445dba78..96cda5c68d90 100644 --- a/gcc/config/riscv/sifive-vector-builtins-functions.def +++ b/gcc/config/riscv/sifive-vector-builtins-functions.def @@ -16,4 +16,10 @@ DEF_RVV_FUNCTION (vqmaccsu, sf_vqmacc, none_tu_preds, i_su_qdvv_ops) DEF_RVV_FUNCTION (vqmaccus, sf_vqmacc, none_tu_preds, i_us_qdvv_ops) #undef REQUIRED_EXTENSIONS +#define REQUIRED_EXTENSIONS XSFVFNRCLIPXFQF_EXT +DEF_RVV_FUNCTION (xu, sf_vfnrclip, full_preds, u_clip_qf_ops) +DEF_RVV_FUNCTION (x, sf_vfnrclip, full_preds, i_clip_qf_ops) + +#undef REQUIRED_EXTENSIONS + #undef DEF_RVV_FUNCTION diff --git a/gcc/config/riscv/sifive-vector.md b/gcc/config/riscv/sifive-vector.md index 01ffa6f13f78..a8677faed42a 100644 --- a/gcc/config/riscv/sifive-vector.md +++ b/gcc/config/riscv/sifive-vector.md @@ -141,3 +141,50 @@ "sf.vqmaccus.4x8x4\t%0,%3,%4" [(set_attr "type" "vsfmuladd") (set_attr "mode" "")]) + +;; CLIP +(define_insn "@pred_fnr_clip" + [(set (match_operand: 0 "register_operand" "=vd,vd, vr, vr,vd, vr, &vr, &vr, vd, vr, &vr, &vr") + (if_then_else: + (unspec: + [(match_operand: 1 "vector_mask_operand" " vm,vm,Wc1,Wc1,vm,Wc1,vmWc1,vmWc1, vm,Wc1,vmWc1,vmWc1") + (match_operand 5 "vector_length_operand" " rK,rK, rK, rK,rK, rK, rK, rK, rK, rK, rK, rK") + (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:VWEXTI 3 "register_operand" " vr,vr, vr, vr, 0, 0, vr, vr, 0, 0, vr, vr") + (match_operand: 4 "vector_shift_operand" " 0, 0, 0, 0,vr, vr, vr, vr, vk, vk, vk, vk")] VNCLIP) + (match_operand: 2 "vector_merge_operand" " 0,vu, 0, vu,vu, vu, vu, 0, vu, vu, vu, 0")))] + "TARGET_VECTOR && TARGET_XSFVFNRCLIPXFQF" + "sf.vfnrclip.x.f.qf%o4\t%0,%3,%v4%p1" + [(set_attr "type" "vsfclip") + (set_attr "mode" "") + (set_attr "spec_restriction" "thv,thv,thv,thv,thv,thv,none,none,thv,thv,none,none")]) + +(define_insn "@pred_fnr_clip_scalar" + [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, &vr, &vr") + (if_then_else: + (unspec: + [(match_operand: 1 "vector_mask_operand" " vm, vm,Wc1,Wc1,vmWc1,vmWc1") + (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK") + (match_operand 6 "const_int_operand" " i, i, i, i, i, i") + (match_operand 7 "const_int_operand" " i, i, i, i, i, i") + (match_operand 8 "const_int_operand" " i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:VWEXTI 3 "register_operand" " 0, 0, 0, 0, vr, vr") + (match_operand 4 "pmode_reg_or_uimm5_operand" " rK, rK, rK, rK, rK, rK")] VNCLIP) + (match_operand: 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))] + "TARGET_VECTOR && TARGET_XSFVFNRCLIPXFQF" + "sf.vfnrclip.xu.f.qf%o4\t%0,%3,%4%p1" + [(set_attr "type" "vsfclip") + (set_attr "mode" "") + (set_attr "spec_restriction" "thv,thv,thv,thv,none,none")]) From b64528a82c6f1ea00608aea22cb25ad6f1810155 Mon Sep 17 00:00:00 2001 From: yulong Date: Tue, 29 Oct 2024 11:07:13 +0800 Subject: [PATCH 06/10] Rename intrinsics function basename --- .../riscv/riscv-vector-builtins-bases.cc | 55 ++++++++++--------- .../riscv/riscv-vector-builtins-bases.h | 12 ++-- .../riscv/riscv-vector-builtins-shapes.cc | 9 +-- gcc/config/riscv/riscv-vector-builtins.cc | 11 ++-- gcc/config/riscv/riscv-vector-builtins.def | 3 +- .../sifive-vector-builtins-functions.def | 20 +++---- gcc/config/riscv/sifive-vector.md | 12 ++-- 7 files changed, 63 insertions(+), 59 deletions(-) diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc index 51337e5a3754..55bd3a92c9e3 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.cc +++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc @@ -721,12 +721,17 @@ class vfnrclip : public function_base rtx expand (function_expander &e) const override { - if (e.op_info->op == OP_TYPE_f_qf) - { - return e.use_exact_insn ( + if (e.op_info->op == OP_TYPE_x_f_qf) + { + return e.use_exact_insn ( code_for_pred_fnr_clip (ZERO_EXTEND, e.vector_mode ())); - gcc_unreachable (); - } + } + if (e.op_info->op == OP_TYPE_xu_f_qf) + { + return e.use_exact_insn ( + code_for_pred_fnr_clip_scalar (ZERO_EXTEND, e.vector_mode ())); + } + gcc_unreachable (); } }; @@ -908,10 +913,10 @@ class vqmacc : public function_base { if (e.op_info->op == OP_TYPE_4x8x4) return e.use_widen_ternop_insn ( - code_for_pred_quad_mul_plus_qoq (SIGN_EXTEND, e.vector_mode ())); + code_for_pred_matrix_mul_plus_qoq (SIGN_EXTEND, e.vector_mode ())); if (e.op_info->op == OP_TYPE_2x8x2) return e.use_widen_ternop_insn ( - code_for_pred_quad_mul_plus_dod (SIGN_EXTEND, e.vector_mode ())); + code_for_pred_matrix_mul_plus_dod (SIGN_EXTEND, e.vector_mode ())); gcc_unreachable (); } }; @@ -930,10 +935,10 @@ class vqmaccu : public function_base { if (e.op_info->op == OP_TYPE_4x8x4) return e.use_widen_ternop_insn ( - code_for_pred_quad_mul_plus_qoq (ZERO_EXTEND, e.vector_mode ())); + code_for_pred_matrix_mul_plus_qoq (ZERO_EXTEND, e.vector_mode ())); if (e.op_info->op == OP_TYPE_2x8x2) return e.use_widen_ternop_insn ( - code_for_pred_quad_mul_plus_dod (SIGN_EXTEND, e.vector_mode ())); + code_for_pred_matrix_mul_plus_dod (SIGN_EXTEND, e.vector_mode ())); gcc_unreachable (); } }; @@ -952,10 +957,10 @@ class vqmaccsu : public function_base { if (e.op_info->op == OP_TYPE_4x8x4) return e.use_widen_ternop_insn ( - code_for_pred_quad_mul_plussu_qoq (e.vector_mode ())); + code_for_pred_matrix_mul_plussu_qoq (e.vector_mode ())); if (e.op_info->op == OP_TYPE_2x8x2) return e.use_widen_ternop_insn ( - code_for_pred_quad_mul_plussu_dod (e.vector_mode ())); + code_for_pred_matrix_mul_plussu_dod (e.vector_mode ())); gcc_unreachable (); } }; @@ -974,10 +979,10 @@ class vqmaccus : public function_base { if (e.op_info->op == OP_TYPE_4x8x4) return e.use_widen_ternop_insn ( - code_for_pred_quad_mul_plusus_qoq (e.vector_mode ())); + code_for_pred_matrix_mul_plusus_qoq (e.vector_mode ())); if (e.op_info->op == OP_TYPE_2x8x2) return e.use_widen_ternop_insn ( - code_for_pred_quad_mul_plusus_dod (e.vector_mode ())); + code_for_pred_matrix_mul_plusus_dod (e.vector_mode ())); gcc_unreachable (); } }; @@ -2686,8 +2691,8 @@ static CONSTEXPR const sat_op vssrl_obj; static CONSTEXPR const sat_op vssra_obj; static CONSTEXPR const vnclip vnclip_obj; static CONSTEXPR const vnclip vnclipu_obj; -static CONSTEXPR const vfnrclip x_obj; -static CONSTEXPR const vfnrclip xu_obj; +static CONSTEXPR const vfnrclip sf_vfnrclip_x_obj; +static CONSTEXPR const vfnrclip sf_vfnrclip_xu_obj; static CONSTEXPR const mask_logic vmand_obj; static CONSTEXPR const mask_nlogic vmnand_obj; static CONSTEXPR const mask_notlogic vmandn_obj; @@ -2877,10 +2882,10 @@ static CONSTEXPR const th_loadstore_width vs static CONSTEXPR const th_loadstore_width vsuxh_obj; static CONSTEXPR const th_loadstore_width vsuxw_obj; static CONSTEXPR const th_extract vext_x_v_obj; -static CONSTEXPR const vqmacc vqmacc_obj; -static CONSTEXPR const vqmaccu vqmaccu_obj; -static CONSTEXPR const vqmaccsu vqmaccsu_obj; -static CONSTEXPR const vqmaccsu vqmaccus_obj; +static CONSTEXPR const vqmacc sf_vqmacc_obj; +static CONSTEXPR const vqmaccu sf_vqmaccu_obj; +static CONSTEXPR const vqmaccsu sf_vqmaccsu_obj; +static CONSTEXPR const vqmaccsu sf_vqmaccus_obj; /* Crypto Vector */ static CONSTEXPR const vandn vandn_obj; @@ -3022,8 +3027,8 @@ BASE (vssra) BASE (vssrl) BASE (vnclip) BASE (vnclipu) -BASE (x) -BASE (xu) +BASE (sf_vfnrclip_x) +BASE (sf_vfnrclip_xu) BASE (vmand) BASE (vmnand) BASE (vmandn) @@ -3213,10 +3218,10 @@ BASE (vsuxb) BASE (vsuxh) BASE (vsuxw) BASE (vext_x_v) -BASE (vqmacc) -BASE (vqmaccu) -BASE (vqmaccsu) -BASE (vqmaccus) +BASE (sf_vqmacc) +BASE (sf_vqmaccu) +BASE (sf_vqmaccsu) +BASE (sf_vqmaccus) /* Crypto vector */ BASE (vandn) BASE (vbrev) diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.h b/gcc/config/riscv/riscv-vector-builtins-bases.h index a2e6b0b35ec6..1ac3f073bcf6 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.h +++ b/gcc/config/riscv/riscv-vector-builtins-bases.h @@ -122,8 +122,8 @@ extern const function_base *const vssra; extern const function_base *const vssrl; extern const function_base *const vnclip; extern const function_base *const vnclipu; -extern const function_base *const x; -extern const function_base *const xu; +extern const function_base *const sf_vfnrclip_x; +extern const function_base *const sf_vfnrclip_xu; extern const function_base *const vmand; extern const function_base *const vmnand; extern const function_base *const vmandn; @@ -313,10 +313,10 @@ extern const function_base *const vsuxb; extern const function_base *const vsuxh; extern const function_base *const vsuxw; extern const function_base *const vext_x_v; -extern const function_base *const vqmacc; -extern const function_base *const vqmaccu; -extern const function_base *const vqmaccsu; -extern const function_base *const vqmaccus; +extern const function_base *const sf_vqmacc; +extern const function_base *const sf_vqmaccu; +extern const function_base *const sf_vqmaccsu; +extern const function_base *const sf_vqmaccus; /* Below function_base are Vectro Crypto*/ extern const function_base *const vandn; extern const function_base *const vbrev; diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc index 3fe7ba4898d1..8baeb33f3fe0 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc @@ -1297,8 +1297,7 @@ struct sf_vqmacc_def : public build_base if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred)) return nullptr; - b.append_name ("__riscv_sf_"); - b.append_name (instance.base_name); + b.append_base_name (instance.base_name); /* vop_v --> vop_v_. */ if (!overloaded_p) @@ -1330,14 +1329,12 @@ struct sf_vfnrclip_def : public build_base if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred)) return nullptr; - b.append_name ("__riscv_sf_vfnrclip_"); - printf("aaaaaa %s\n", instance.base_name); - b.append_name (instance.base_name); + b.append_base_name (instance.base_name); if (!overloaded_p) { /* vop --> vop_. */ - b.append_name (operand_suffixes[instance.op_info->op]); + b.append_name ("_f_qf"); /* vop_v --> vop_v_. */ b.append_name (type_suffixes[instance.type.index].vector); /* vop_ --> vop__. diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc index 6c10811ff5d3..88882d35b93e 100644 --- a/gcc/config/riscv/riscv-vector-builtins.cc +++ b/gcc/config/riscv/riscv-vector-builtins.cc @@ -2530,16 +2530,17 @@ static CONSTEXPR const rvv_op_info i_narrow_shift_vwx_ops /* A static operand information for double demote type func (vector_type, * shift_type) function registration. */ static CONSTEXPR const rvv_op_info u_clip_qf_ops - = {wextu_ops, /* Types */ - OP_TYPE_f_qf, /* Suffix */ - rvv_arg_type_info (RVV_BASE_double_trunc_unsigned_vector), /* Return type */ + = {wextu_ops, /* Types */ + OP_TYPE_xu_f_qf, /* Suffix */ + rvv_arg_type_info ( + RVV_BASE_double_trunc_unsigned_vector), /* Return type */ clip_args /* Args */}; /* A static operand information for double demote type func (vector_type, * shift_type) function registration. */ static CONSTEXPR const rvv_op_info i_clip_qf_ops - = {wexti_ops, /* Types */ - OP_TYPE_f_qf, /* Suffix */ + = {wexti_ops, /* Types */ + OP_TYPE_x_f_qf, /* Suffix */ rvv_arg_type_info (RVV_BASE_double_trunc_signed_vector), /* Return type */ clip_args /* Args */}; diff --git a/gcc/config/riscv/riscv-vector-builtins.def b/gcc/config/riscv/riscv-vector-builtins.def index 759a8ea82e61..ee899f77fcb9 100644 --- a/gcc/config/riscv/riscv-vector-builtins.def +++ b/gcc/config/riscv/riscv-vector-builtins.def @@ -636,7 +636,8 @@ DEF_RVV_OP_TYPE (xu_w) DEF_RVV_OP_TYPE (s) DEF_RVV_OP_TYPE (4x8x4) DEF_RVV_OP_TYPE (2x8x2) -DEF_RVV_OP_TYPE (f_qf) +DEF_RVV_OP_TYPE (x_f_qf) +DEF_RVV_OP_TYPE (xu_f_qf) DEF_RVV_PRED_TYPE (ta) DEF_RVV_PRED_TYPE (tu) diff --git a/gcc/config/riscv/sifive-vector-builtins-functions.def b/gcc/config/riscv/sifive-vector-builtins-functions.def index 96cda5c68d90..12bc11a413f6 100644 --- a/gcc/config/riscv/sifive-vector-builtins-functions.def +++ b/gcc/config/riscv/sifive-vector-builtins-functions.def @@ -3,22 +3,22 @@ #endif #define REQUIRED_EXTENSIONS XSFVQMACCQOQ_EXT -DEF_RVV_FUNCTION (vqmaccu, sf_vqmacc, none_tu_preds, u_qqvv_ops) -DEF_RVV_FUNCTION (vqmacc, sf_vqmacc, none_tu_preds, i_qqvv_ops) -DEF_RVV_FUNCTION (vqmaccsu, sf_vqmacc, none_tu_preds, i_su_qqvv_ops) -DEF_RVV_FUNCTION (vqmaccus, sf_vqmacc, none_tu_preds, i_us_qqvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccu, sf_vqmacc, none_tu_preds, u_qqvv_ops) +DEF_RVV_FUNCTION (sf_vqmacc, sf_vqmacc, none_tu_preds, i_qqvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccsu, sf_vqmacc, none_tu_preds, i_su_qqvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccus, sf_vqmacc, none_tu_preds, i_us_qqvv_ops) #undef REQUIRED_EXTENSIONS #define REQUIRED_EXTENSIONS XSFVQMACCDOD_EXT -DEF_RVV_FUNCTION (vqmaccu, sf_vqmacc, none_tu_preds, u_qdvv_ops) -DEF_RVV_FUNCTION (vqmacc, sf_vqmacc, none_tu_preds, i_qdvv_ops) -DEF_RVV_FUNCTION (vqmaccsu, sf_vqmacc, none_tu_preds, i_su_qdvv_ops) -DEF_RVV_FUNCTION (vqmaccus, sf_vqmacc, none_tu_preds, i_us_qdvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccu, sf_vqmacc, none_tu_preds, u_qdvv_ops) +DEF_RVV_FUNCTION (sf_vqmacc, sf_vqmacc, none_tu_preds, i_qdvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccsu, sf_vqmacc, none_tu_preds, i_su_qdvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccus, sf_vqmacc, none_tu_preds, i_us_qdvv_ops) #undef REQUIRED_EXTENSIONS #define REQUIRED_EXTENSIONS XSFVFNRCLIPXFQF_EXT -DEF_RVV_FUNCTION (xu, sf_vfnrclip, full_preds, u_clip_qf_ops) -DEF_RVV_FUNCTION (x, sf_vfnrclip, full_preds, i_clip_qf_ops) +DEF_RVV_FUNCTION (sf_vfnrclip_xu, sf_vfnrclip, full_preds, u_clip_qf_ops) +DEF_RVV_FUNCTION (sf_vfnrclip_x, sf_vfnrclip, full_preds, i_clip_qf_ops) #undef REQUIRED_EXTENSIONS diff --git a/gcc/config/riscv/sifive-vector.md b/gcc/config/riscv/sifive-vector.md index a8677faed42a..c48e6c150eca 100644 --- a/gcc/config/riscv/sifive-vector.md +++ b/gcc/config/riscv/sifive-vector.md @@ -1,4 +1,4 @@ -(define_insn "@pred_quad_mul_plus_qoq" +(define_insn "@pred_matrix_mul_plus_qoq" [(set (match_operand:V_SF 0 "register_operand" "=&vr") (if_then_else:V_SF (unspec: @@ -22,7 +22,7 @@ [(set_attr "type" "vsfmuladd") (set_attr "mode" "")]) -(define_insn "@pred_quad_mul_plussu_qoq" +(define_insn "@pred_matrix_mul_plussu_qoq" [(set (match_operand:V_SF 0 "register_operand" "=&vr") (if_then_else:V_SF (unspec: @@ -46,7 +46,7 @@ [(set_attr "type" "vsfmuladd") (set_attr "mode" "")]) -(define_insn "@pred_quad_mul_plusus_qoq" +(define_insn "@pred_matrix_mul_plusus_qoq" [(set (match_operand:V_SF 0 "register_operand" "=&vr") (if_then_else:V_SF (unspec: @@ -70,7 +70,7 @@ [(set_attr "type" "vsfmuladd") (set_attr "mode" "")]) -(define_insn "@pred_quad_mul_plus_dod" +(define_insn "@pred_matrix_mul_plus_dod" [(set (match_operand:V_SF 0 "register_operand" "=&vr") (if_then_else:V_SF (unspec: @@ -94,7 +94,7 @@ [(set_attr "type" "vsfmuladd") (set_attr "mode" "")]) -(define_insn "@pred_quad_mul_plussu_dod" +(define_insn "@pred_matrix_mul_plussu_dod" [(set (match_operand:V_SF 0 "register_operand" "=&vr") (if_then_else:V_SF (unspec: @@ -118,7 +118,7 @@ [(set_attr "type" "vsfmuladd") (set_attr "mode" "")]) -(define_insn "@pred_quad_mul_plusus_dod" +(define_insn "@pred_matrix_mul_plusus_dod" [(set (match_operand:V_SF 0 "register_operand" "=&vr") (if_then_else:V_SF (unspec: From be28eaa4c8a04e6b707ac5e98972360c6f465417 Mon Sep 17 00:00:00 2001 From: yulong Date: Tue, 29 Oct 2024 11:10:12 +0800 Subject: [PATCH 07/10] Add some temporary test cases --- .../gcc.target/riscv/rvv/sf_vfnrclip.c | 21 +++++++++++++++++++ .../gcc.target/riscv/rvv/sf_vqmacc.c | 17 +++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/sf_vfnrclip.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/sf_vqmacc.c diff --git a/gcc/testsuite/gcc.target/riscv/rvv/sf_vfnrclip.c b/gcc/testsuite/gcc.target/riscv/rvv/sf_vfnrclip.c new file mode 100644 index 000000000000..2dbc87bb479f --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/sf_vfnrclip.c @@ -0,0 +1,21 @@ +#include "riscv_vector.h" + +vint8mf8_t test1(float vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf8(vs2, vs1, vl); +} + +vint8mf4_t test2(float vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf4(vs2, vs1, vl); +} + +vint8mf2_t test2(float vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf2(vs2, vs1, vl); +} + +vint8m1_t test2(float vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m1(vs2, vs1, vl); +} + +vint8m2_t test2(float vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m2(vs2, vs1, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/sf_vqmacc.c b/gcc/testsuite/gcc.target/riscv/rvv/sf_vqmacc.c new file mode 100644 index 000000000000..44439ac21a4d --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/sf_vqmacc.c @@ -0,0 +1,17 @@ +#include "riscv_vector.h" + +vint32m1_t test1(vint32m1_t vd, vint8m1_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_sf_vqmacc_4x8x4_i32m1(vd, vs1, vs2, vl); +} + +vint32m2_t test2(vint32m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_sf_vqmacc_4x8x4_i32m2(vd, vs1, vs2, vl); +} + +vint32m4_t test3(vint32m4_t vd, vint8m1_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_sf_vqmacc_4x8x4_i32m4(vd, vs1, vs2, vl); +} + +vint32m8_t test4(vint32m8_t vd, vint8m1_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_sf_vqmacc_4x8x4_i32m8(vd, vs1, vs2, vl); +} From 584fce953731a5f80efe4e5c2432af311dfb09f8 Mon Sep 17 00:00:00 2001 From: yulong Date: Mon, 4 Nov 2024 17:58:40 +0800 Subject: [PATCH 08/10] Add V_SF_FNRCLIPXFQF and V_SF_FNRCLIP iterator --- .../riscv/riscv-vector-builtins-bases.cc | 1 - gcc/config/riscv/sifive-vector.md | 12 +++++------ gcc/config/riscv/vector-iterators.md | 20 +++++++++++++++++++ 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc index 55bd3a92c9e3..b62b061578b7 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.cc +++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc @@ -710,7 +710,6 @@ class vfnrclip : public function_base { public: bool has_merge_operand_p () const override { return false; } - bool may_require_qfrm_p () const override { return true; } bool can_be_overloaded_p (enum predication_type_index pred) const override { if (pred == PRED_TYPE_mu || pred == PRED_TYPE_tu || pred == PRED_TYPE_tumu) diff --git a/gcc/config/riscv/sifive-vector.md b/gcc/config/riscv/sifive-vector.md index c48e6c150eca..3bdb162398d7 100644 --- a/gcc/config/riscv/sifive-vector.md +++ b/gcc/config/riscv/sifive-vector.md @@ -144,8 +144,8 @@ ;; CLIP (define_insn "@pred_fnr_clip" - [(set (match_operand: 0 "register_operand" "=vd,vd, vr, vr,vd, vr, &vr, &vr, vd, vr, &vr, &vr") - (if_then_else: + [(set (match_operand: 0 "register_operand" "=vd,vd, vr, vr,vd, vr, &vr, &vr, vd, vr, &vr, &vr") + (if_then_else: (unspec: [(match_operand: 1 "vector_mask_operand" " vm,vm,Wc1,Wc1,vm,Wc1,vmWc1,vmWc1, vm,Wc1,vmWc1,vmWc1") (match_operand 5 "vector_length_operand" " rK,rK, rK, rK,rK, rK, rK, rK, rK, rK, rK, rK") @@ -156,14 +156,14 @@ (reg:SI VL_REGNUM) (reg:SI VTYPE_REGNUM) (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) - (unspec: + (unspec: [(match_operand:VWEXTI 3 "register_operand" " vr,vr, vr, vr, 0, 0, vr, vr, 0, 0, vr, vr") - (match_operand: 4 "vector_shift_operand" " 0, 0, 0, 0,vr, vr, vr, vr, vk, vk, vk, vk")] VNCLIP) - (match_operand: 2 "vector_merge_operand" " 0,vu, 0, vu,vu, vu, vu, 0, vu, vu, vu, 0")))] + (match_operand: 4 "vector_shift_operand" " 0, 0, 0, 0,vr, vr, vr, vr, vk, vk, vk, vk")] VNCLIP) + (match_operand: 2 "vector_merge_operand" " 0,vu, 0, vu,vu, vu, vu, 0, vu, vu, vu, 0")))] "TARGET_VECTOR && TARGET_XSFVFNRCLIPXFQF" "sf.vfnrclip.x.f.qf%o4\t%0,%3,%v4%p1" [(set_attr "type" "vsfclip") - (set_attr "mode" "") + (set_attr "mode" "") (set_attr "spec_restriction" "thv,thv,thv,thv,thv,thv,none,none,thv,thv,none,none")]) (define_insn "@pred_fnr_clip_scalar" diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md index 0de750e01120..e62d6f579435 100644 --- a/gcc/config/riscv/vector-iterators.md +++ b/gcc/config/riscv/vector-iterators.md @@ -4530,3 +4530,23 @@ (RVVM2SI "rvvm1qi") (RVVM1SI "rvvmf2qi") ]) + +(define_mode_iterator V_SF_FNRCLIP [ + RVVMF8QI RVVMF4QI RVVMF2QI RVVM1QI RVVM2QI +]) + +(define_mode_attr V_SF_FNRCLIPXFQF [ + (RVVMF2SF "RVVMF8QI") + (RVVM1SF "RVVMF4QI") + (RVVM2SF "RVVMF2QI") + (RVVM4SF "RVVM1QI") + (RVVM8SF "RVVM2QI") +]) + +(define_mode_attr v_sf_fnrclipxfqf [ + (RVVMF2SF "rvvmf8qi") + (RVVM1SF "rvvmf4qi") + (RVVM2SF "rvvmf2qi") + (RVVM4SF "rvvm1qi") + (RVVM8SF "rvvm2qi") +]) From 0f056771c29cb1539227698bdd2eeac57d7c76a5 Mon Sep 17 00:00:00 2001 From: Liao Shihua Date: Fri, 15 Nov 2024 11:15:28 +0800 Subject: [PATCH 09/10] Add SiFive Vqmacc Extension testcase --- gcc/testsuite/gcc.target/riscv/arch-44.c | 6 + gcc/testsuite/gcc.target/riscv/arch-45.c | 5 + .../riscv/rvv/xsifivevector/vqmaccdod.c | 838 ++++++++++++++++++ .../riscv/rvv/xsifivevector/vqmaccqoq.c | 448 ++++++++++ 4 files changed, 1297 insertions(+) create mode 100644 gcc/testsuite/gcc.target/riscv/arch-44.c create mode 100644 gcc/testsuite/gcc.target/riscv/arch-45.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vqmaccdod.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vqmaccqoq.c diff --git a/gcc/testsuite/gcc.target/riscv/arch-44.c b/gcc/testsuite/gcc.target/riscv/arch-44.c new file mode 100644 index 000000000000..f01fa91aa0bd --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/arch-44.c @@ -0,0 +1,6 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccqoq -mabi=lp64d" } */ +int foo() +{ +} + diff --git a/gcc/testsuite/gcc.target/riscv/arch-45.c b/gcc/testsuite/gcc.target/riscv/arch-45.c new file mode 100644 index 000000000000..6942cd1fca91 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/arch-45.c @@ -0,0 +1,5 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccdod -mabi=lp64d" } */ +int foo() +{ +} \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vqmaccdod.c b/gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vqmaccdod.c new file mode 100644 index 000000000000..8c01c2e55b64 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vqmaccdod.c @@ -0,0 +1,838 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gc_xsfvqmaccdod -mabi=ilp32d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** foo1: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo1(vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m1(vd, vs1, vs2, vl); +} + +/* +** foo2: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo2(vint32m2_t vd, vint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m2(vd, vs1, vs2, vl); +} + +/* +** foo3: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo3(vint32m4_t vd, vint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m4(vd, vs1, vs2, vl); +} + +/* +** foo4: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo4(vint32m8_t vd, vint8m1_t vs1, vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m8(vd, vs1, vs2, vl); +} + +/* +** foo5: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo5(vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo6: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo6(vint32m2_t vd, vint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo7: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo7(vint32m4_t vd, vint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo8: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo8(vint32m8_t vd, vint8m1_t vs1, vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo9: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo9(vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m1_tu(vd, vs1, vs2, vl); +} + +/* +** foo10: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo10(vint32m2_t vd, vint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m2_tu(vd, vs1, vs2, vl); +} + +/* +** foo11: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo11(vint32m4_t vd, vint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m4_tu(vd, vs1, vs2, vl); +} + +/* +** foo12: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo12(vint32m8_t vd, vint8m1_t vs1, vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m8_tu(vd, vs1, vs2, vl); +} + +/* +** foo: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo13(vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo14: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo14(vint32m2_t vd, vint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo15: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo15(vint32m4_t vd, vint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo16: +** ... +** vsetvli\t +** sf\.vqmacc\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo16(vint32m8_t vd, vint8m1_t vs1, vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo17: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo17(vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m1(vd, vs1, vs2, vl); +} + +/* +** foo18: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo18(vint32m2_t vd, vint8m1_t vs1, vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m2(vd, vs1, vs2, vl); +} + +/* +** foo19: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo19(vint32m4_t vd, vint8m1_t vs1, vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m4(vd, vs1, vs2, vl); +} + +/* +** foo20: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo20(vint32m8_t vd, vint8m1_t vs1, vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m8(vd, vs1, vs2, vl); +} + +/* +** foo21: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo21(vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo22: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo22(vint32m2_t vd, vint8m1_t vs1, vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo23: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo23(vint32m4_t vd, vint8m1_t vs1, vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo24: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo24(vint32m8_t vd, vint8m1_t vs1, vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo25: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo25(vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m1_tu(vd, vs1, vs2, vl); +} + +/* +** foo26: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +void foo26(vint32m2_t vd, vint8m1_t vs1, vuint8m2_t vs2, size_t vl) + foo26(vint32m2_t vd, vint8m1_t vs1, vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m2_tu(vd, vs1, vs2, vl); +} + +/* +** foo27: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo27(vint32m4_t vd, vint8m1_t vs1, vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m4_tu(vd, vs1, vs2, vl); +} + +/* +** foo28: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo28(vint32m8_t vd, vint8m1_t vs1, vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m8_tu(vd, vs1, vs2, vl); +} + +/* +** foo29: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo29(vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo30: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo30(vint32m2_t vd, vint8m1_t vs1, vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo31: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo31(vint32m4_t vd, vint8m1_t vs1, vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo32: +** ... +** vsetvli\t +** sf\.vqmaccsu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo32(vint32m8_t vd, vint8m1_t vs1, vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo33: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo33(vint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m1(vd, vs1, vs2, vl); +} + +/* +** foo34: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo34(vint32m2_t vd, vuint8m1_t vs1, vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m2(vd, vs1, vs2, vl); +} + +/* +** foo35: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo35(vint32m4_t vd, vuint8m1_t vs1, vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m4(vd, vs1, vs2, vl); +} + +/* +** foo36: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo36(vint32m8_t vd, vuint8m1_t vs1, vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m8(vd, vs1, vs2, vl); +} + +/* +** foo37: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo37(vint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo38: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo38(vint32m2_t vd, vuint8m1_t vs1, vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo39: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo39(vint32m4_t vd, vuint8m1_t vs1, vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo40: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo40(vint32m8_t vd, vuint8m1_t vs1, vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo41: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo41(vint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m1_tu(vd, vs1, vs2, vl); +} + +/* +** foo42: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo42(vint32m2_t vd, vuint8m1_t vs1, vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m2_tu(vd, vs1, vs2, vl); +} + +/* +** foo43: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo43(vint32m4_t vd, vuint8m1_t vs1, vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m4_tu(vd, vs1, vs2, vl); +} + +/* +** foo44: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo44(vint32m8_t vd, vuint8m1_t vs1, vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m8_tu(vd, vs1, vs2, vl); +} + +/* +** foo45: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo45(vint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo46: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo46(vint32m2_t vd, vuint8m1_t vs1, vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo47: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo47(vint32m4_t vd, vuint8m1_t vs1, vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo48: +** ... +** vsetvli\t +** sf\.vqmaccu\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo48(vint32m8_t vd, vuint8m1_t vs1, vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo49: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo49(vint32m1_t vd, vuint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m1(vd, vs1, vs2, vl); +} + +/* +** foo50: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo50(vint32m2_t vd, vuint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m2(vd, vs1, vs2, vl); +} + +/* +** foo51: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo51(vint32m4_t vd, vuint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m4(vd, vs1, vs2, vl); +} + +/* +** foo52: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo52(vint32m8_t vd, vuint8m1_t vs1, vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m8(vd, vs1, vs2, vl); +} + +/* +** foo53: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo53(vint32m1_t vd, vuint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo54: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo54(vint32m2_t vd, vuint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo55: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo55(vint32m4_t vd, vuint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo56: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo56(vint32m8_t vd, vuint8m1_t vs1, vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2(vd, vs1, vs2, vl); +} + +/* +** foo57: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo57(vint32m1_t vd, vuint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m1_tu(vd, vs1, vs2, vl); +} + +/* +** foo58: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo58(vint32m2_t vd, vuint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m2_tu(vd, vs1, vs2, vl); +} + +/* +** foo59: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo59(vint32m4_t vd, vuint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m4_tu(vd, vs1, vs2, vl); +} + +/* +** foo60: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo60(vint32m8_t vd, vuint8m1_t vs1, vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m8_tu(vd, vs1, vs2, vl); +} + +/* +** foo61: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo61(vint32m1_t vd, vuint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo62: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo62(vint32m2_t vd, vuint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo63: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo63(vint32m4_t vd, vuint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_tu(vd, vs1, vs2, vl); +} + +/* +** foo64: +** ... +** vsetvli\t +** sf\.vqmaccus\.2x8x2\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo64(vint32m8_t vd, vuint8m1_t vs1, vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_tu(vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vqmaccqoq.c b/gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vqmaccqoq.c new file mode 100644 index 000000000000..09242f5c3d85 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vqmaccqoq.c @@ -0,0 +1,448 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gc_xsfvqmaccqoq -mabi=ilp32d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** foo1: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo1(vint32m1_t vd, vint8m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_i32m1(vd, vs1, vs2, vl); +} + +/* +** foo2: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo2(vint32m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_i32m2(vd, vs1, vs2, vl); +} + +/* +** foo3: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo3(vint32m4_t vd, vint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_i32m4(vd, vs1, vs2, vl); +} + +/* +** foo4: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo4(vint32m8_t vd, vint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_i32m8(vd, vs1, vs2, vl); +} + +/* +** foo5: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo5(vint32m1_t vd, vint8m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4(vd, vs1, vs2, vl); +} + +/* +** foo6: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo6(vint32m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4(vd, vs1, vs2, vl); +} + +/* +** foo7: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo7(vint32m4_t vd, vint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4(vd, vs1, vs2, vl); +} + +/* +** foo8: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo8(vint32m8_t vd, vint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4(vd, vs1, vs2, vl); +} + +/* +** foo9: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo9(vint32m1_t vd, vint8m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_i32m1_tu(vd, vs1, vs2, vl); +} + +/* +** foo10: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo(vint32m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_i32m2_tu(vd, vs1, vs2, vl); +} + +/* +** foo11: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo11(vint32m4_t vd, vint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_i32m4_tu(vd, vs1, vs2, vl); +} + +/* +** foo12: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo12(vint32m8_t vd, vint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_i32m8_tu(vd, vs1, vs2, vl); +} + +/* +** foo13: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo13(vint32m1_t vd, vint8m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_tu(vd, vs1, vs2, vl); +} + +/* +** foo14: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo14(vint32m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_tu(vd, vs1, vs2, vl); +} + +/* +** foo15: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo15(vint32m4_t vd, vint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_tu(vd, vs1, vs2, vl); +} + +/* +** foo16: +** ... +** vsetvli\t +** sf\.vqmacc\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo16(vint32m8_t vd, vint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf__vqmacc_4x8x4_tu(vd, vs1, vs2, vl); +} + +/* +** foo17: +** ... +** vsetvli\t +** sf\.vqmaccsu\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo17(vint32m1_t vd, vint8m1_t vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccsu_4x8x4_i32m1(vd, vs1, vs2, vl); +} + +/* +** foo18: +** ... +** vsetvli\t +** sf.vqmaccu.4x8x4\t +** ... +*/ + +vint32m8_t foo18(vint32m8_t vd, vuint8m1_t vs1, vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccu_4x8x4_tu(vd, vs1, vs2, vl); +} + +/* +** foo19: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo19(vint32m1_t vd, vuint8m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m1(vd, vs1, vs2, vl); +} + +/* +** foo20: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo20(vint32m2_t vd, vuint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m2(vd, vs1, vs2, vl); +} + +/* +** foo21: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo21(vint32m4_t vd, vuint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m4(vd, vs1, vs2, vl); +} + +/* +** foo22: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo22(vint32m8_t vd, vuint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m8(vd, vs1, vs2, vl); +} + +/* +** foo23: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo23(vint32m1_t vd, vuint8m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4(vd, vs1, vs2, vl); +} + +/* +** foo24: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo24(vint32m2_t vd, vuint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4(vd, vs1, vs2, vl); +} + +/* +** foo25: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo25(vint32m4_t vd, vuint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4(vd, vs1, vs2, vl); +} + +/* +** foo26: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo26(vint32m8_t vd, vuint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4(vd, vs1, vs2, vl); +} + +/* +** foo27: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo27(vint32m1_t vd, vuint8m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m1_tu(vd, vs1, vs2, vl); +} + +/* +** foo28: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo28(vint32m2_t vd, vuint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m2_tu(vd, vs1, vs2, vl); +} + +/* +** foo29: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo29(vint32m4_t vd, vuint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m4_tu(vd, vs1, vs2, vl); +} + +/* +** foo30: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo30(vint32m8_t vd, vuint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m8_tu(vd, vs1, vs2, vl); +} + +/* +** foo31: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m1_t foo31(vint32m1_t vd, vuint8m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m1_tu(vd, vs1, vs2, vl); +} + +/* +** foo32: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m2_t foo32(vint32m2_t vd, vuint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m2_tu(vd, vs1, vs2, vl); +} + +/* +** foo33: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m4_t foo33(vint32m4_t vd, vuint8m1_t vs1, vint8m2_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m4_tu(vd, vs1, vs2, vl); +} + +/* +** foo34: +** ... +** vsetvli\t +** sf\.vqmaccus\.4x8x4\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint32m8_t foo34(vint32m8_t vd, vuint8m1_t vs1, vint8m4_t vs2, size_t vl) +{ + return __riscv_sf__vqmaccus_4x8x4_i32m8_tu(vd, vs1, vs2, vl); +} + From 47702143406990de4aba70cc5ef2aa0e6b3e17ff Mon Sep 17 00:00:00 2001 From: Liao Shihua Date: Fri, 15 Nov 2024 11:17:29 +0800 Subject: [PATCH 10/10] Add SiFive Vfnrclip Extension testcase --- gcc/testsuite/gcc.target/riscv/arch-46.c | 6 + .../riscv/rvv/xsifivevector/vfnrclip.c | 1584 +++++++++++++++++ 2 files changed, 1590 insertions(+) create mode 100644 gcc/testsuite/gcc.target/riscv/arch-46.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vfnrclip.c diff --git a/gcc/testsuite/gcc.target/riscv/arch-46.c b/gcc/testsuite/gcc.target/riscv/arch-46.c new file mode 100644 index 000000000000..371a4e86b54b --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/arch-46.c @@ -0,0 +1,6 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gc_xsfvfnrclip -mabi=lp64d -O3" } */ +int foo() +{ +} + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vfnrclip.c b/gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vfnrclip.c new file mode 100644 index 000000000000..1b5a3ea62631 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsifivevector/vfnrclip.c @@ -0,0 +1,1584 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gc_xsfvfnrclip -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** foo1: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo1(vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf8(vs2, rs1, vl); +} + +/* +** foo2: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo2(vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf4(vs2, rs1, vl); +} + +/* +** foo3: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo3(vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf2(vs2, rs1, vl); +} + +/* +** foo4: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo4(vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m1(vs2, rs1, vl); +} + +/* +** foo5: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo5(vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m2(vs2, rs1, vl); +} + +/* +** foo6: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo6(vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl); +} + +/* +** foo7: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo7(vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl); +} + +/* +** foo8: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo8(vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl); +} + +/* +** foo9: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo9(vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl); +} + +/* +** foo10: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo10(vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl); +} + +/* +** foo11: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo11(vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf__vfnrclip_xu_f_qf_u8mf8(vs2, rs1, vl); +} + +/* +** foo12: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo12(vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4(vs2, rs1, vl); +} + +/* +** foo13: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo13(vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2(vs2, rs1, vl); +} + +/* +** foo14: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo14(vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m1(vs2, rs1, vl); +} + +/* +** foo15: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo15(vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m2(vs2, rs1, vl); +} + +/* +** foo16: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo16(vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + +/* +** foo17: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo17(vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + +/* +** foo18: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo18(vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + +/* +** foo19: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo19(vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + +/* +** foo20: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo20(vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + + +/* +** foo21: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo21(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_m(mask, vs2, rs1, vl); +} + +/* +** foo22: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo22(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_m(mask, vs2, rs1, vl); +} + +/* +** foo23: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo23(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_m(mask, vs2, rs1, vl); +} + +/* +** foo24: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo24(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m1_m(mask, vs2, rs1, vl); +} + +/* +** foo25: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo25(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m2_m(mask, vs2, rs1, vl); +} + + +/* +** foo26: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo26(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_m(vs2, rs1, vl); +} + +/* +** foo27: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo27(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_m(vs2, rs1, vl); +} + +/* +** foo28: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo28(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_m(vs2, rs1, vl); +} + +/* +** foo29: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo29(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_m(vs2, rs1, vl); +} + +/* +** foo30: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo30(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_m(vs2, rs1, vl); +} + + +/* +** foo31: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo31(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo32: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo32(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo33: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo33(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo34: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo34(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m1_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo35: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo35(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m2_tu(maskedoff, vs2, rs1, vl); +} + + +/* +** foo36: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo36(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo37: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo37(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo38: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo38(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo39: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo39(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo40: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo40(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + + +/* +** foo41: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo41(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo42: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo42(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo43: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo43(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo44: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo44(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo45: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo45(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tu(maskedoff, vs2, rs1, vl); +} + + +/* +** foo46: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo46(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo47: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo47(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo48: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo48(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo49: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo49(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** foo50: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo50(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + + +/* +** foo51: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo51(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo52: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo52(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo53: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo53(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo54: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo54(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo55: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo55(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** foo56: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo56(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo57: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo57(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo58: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo58(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo59: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo59(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo60: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo60(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** foo61: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo61(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo62: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo62(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo63: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo63(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo64: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo64(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo65: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo65(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** foo66: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo66(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo67: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo67(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo68: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo68(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo69: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo69(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo70: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo70(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tum(maskedoff, vs2, rs1, vl); +} + + +/* +** foo71: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo71(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo72: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo72(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo73: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo73(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo74: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo74(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo75: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo75(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** foo76: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo76(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo77: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo77(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo78: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo78(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo79: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo79(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo80: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo80(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** foo81: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo81(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo82: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo82(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo83: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo83(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo84: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo84(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo85: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo85(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** foo86: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo86(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo87: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo87(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo88: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo88(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo89: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo89(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo90: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo90(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_tumu(maskedoff, vs2, rs1, vl); +} + +/* +** foo91: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo91(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo92: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo92(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo93: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo93(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo94: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo94(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo95: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo95(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_i8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** foo96: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo96(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo97: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo97(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo98: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo98(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo99: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo99(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo100: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo100(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** foo101: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo101(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo102: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo102(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo103: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo103(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo84: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo104(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo105: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo105(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** foo106: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo106(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo87: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo107(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo108: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo108(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo109: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo109(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** foo110: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qfs+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo110(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf_mu(maskedoff, vs2, rs1, vl); +} + + +/* +** foo111: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf8_t foo111(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl); +} + +/* +** foo112: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf4_t foo112(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl); +} + +/* +** foo113: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8mf2_t foo113(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl); +} + +/* +** foo114: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m1_t foo114(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl); +} + +/* +** foo115: +** ... +** vsetvli\t +** sf\.vfnrclip\.x\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vint8m2_t foo115(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl); +} + + +/* +** foo116: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf8_t foo116(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + +/* +** foo117: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf4_t foo117(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + +/* +** foo118: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8mf2_t foo118(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + +/* +** foo119: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m1_t foo119(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + +/* +** foo120: +** ... +** vsetvli\t +** sf\.vfnrclip\.xu\.f\.qf\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t +** ... +*/ + +vuint8m2_t foo120(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) +{ + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +}