diff --git a/clang/include/clang/Basic/BuiltinsXtensa.def b/clang/include/clang/Basic/BuiltinsXtensa.def index bc852bfc984a2..01d26ffc6d762 100644 --- a/clang/include/clang/Basic/BuiltinsXtensa.def +++ b/clang/include/clang/Basic/BuiltinsXtensa.def @@ -124,7 +124,165 @@ BUILTIN(__builtin_xtensa_wsr_m3, "vUi", "n") BUILTIN(__builtin_xtensa_rsr_m3, "Ui", "n") BUILTIN(__builtin_xtensa_xsr_m3, "vUi*", "n") +// Float intrinsics + +// float __builtin_xtensa_xt_movt_s(float frr, float frs, xtbool bt) +BUILTIN(__builtin_xtensa_xt_movt_s, "fffE1b", "n") + +// float __builtin_xtensa_xt_movf_s(float frr, float frs, xtbool bt) +BUILTIN(__builtin_xtensa_xt_movf_s, "fffE1b", "n") + +// xtbool __builtin_xtensa_xt_oeq_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_oeq_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_ole_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_ole_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_olt_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_olt_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_ueq_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_ueq_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_ule_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_ule_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_ult_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_ult_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_un_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_un_s, "E1bff", "n") + +// xtfloat __builtin_xtensa_xt_sub_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_sub_s, "fff", "n") + +// xtfloat __builtin_xtensa_xt_add_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_add_s, "fff", "n") + +// xtfloat __builtin_xtensa_xt_abs_s(xtfloat frs) +BUILTIN(__builtin_xtensa_xt_abs_s, "ff", "n") + +// xtfloat __builtin_xtensa_xt_mul_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_mul_s, "fff", "n") + +// int __builtin_xtensa_xt_trunc_s(xtfloat frs,immediate imm_t) +BUILTIN(__builtin_xtensa_xt_trunc_s, "ifi", "n") + +// int __builtin_xtensa_xt_utrunc_s(xtfloat frs,immediate imm_t) +BUILTIN(__builtin_xtensa_xt_utrunc_s, "ifi", "n") + +// xtfloat __builtin_xtensa_xt_float_s(int ars,immediate imm_t) +BUILTIN(__builtin_xtensa_xt_float_s, "fii", "n") + +// xtfloat __builtin_xtensa_xt_ufloat_s(int ars,immediate imm_t) +BUILTIN(__builtin_xtensa_xt_ufloat_s, "fii", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_addexp_s(xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_addexp_s, "fff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_addexpm_s(xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_addexpm_s, "fff", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_ceil_s(xtfloat,immediate) +BUILTIN(__builtin_xtensa_xt_ceil_s, "ifi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_div0_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_div0_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_divn_s(xtfloat,xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_divn_s, "ffff", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_floor_s(xtfloat,immediate) +BUILTIN(__builtin_xtensa_xt_floor_s, "ifi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_lsi(const xtfloat*,immediate) +BUILTIN(__builtin_xtensa_xt_lsi, "ff*i", "n") + +BUILTIN(__builtin_xtensa_xt_lsip, "ff**i", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_lsx(const xtfloat*,int) +BUILTIN(__builtin_xtensa_xt_lsx, "ff*i", "n") + +BUILTIN(__builtin_xtensa_xt_lsxp, "ff**i", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_madd_s(xtfloat,xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_madd_s, "ffff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_maddn_s(xtfloat,xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_maddn_s, "ffff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_mkdadj_s(xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_mkdadj_s, "fff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_mksadj_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_mksadj_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_mov_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_mov_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_moveqz_s(xtfloat,xtfloat,int) +BUILTIN(__builtin_xtensa_xt_moveqz_s, "fffi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_movgez_s(xtfloat,xtfloat,int) +BUILTIN(__builtin_xtensa_xt_movgez_s, "fffi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_movltz_s(xtfloat,xtfloat,int) +BUILTIN(__builtin_xtensa_xt_movltz_s, "fffi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_movnez_s(xtfloat,xtfloat,int) +BUILTIN(__builtin_xtensa_xt_movnez_s, "fffi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_msub_s(xtfloat,xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_msub_s, "ffff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_neg_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_neg_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_nexp01_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_nexp01_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_recip0_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_recip0_s, "ff", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_rfr(xtfloat) +BUILTIN(__builtin_xtensa_xt_rfr, "if", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_round_s(xtfloat,immediate) +BUILTIN(__builtin_xtensa_xt_round_s, "ifi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_rsqrt0_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_rsqrt0_s, "ff", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_rur_fcr() +BUILTIN(__builtin_xtensa_xt_rur_fcr, "i", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_rur_fsr() +BUILTIN(__builtin_xtensa_xt_rur_fsr, "i", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_sqrt0_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_sqrt0_s, "ff", "n") + +// void __builtin_xtensa___builtin_xtensa_xt_ssi(xtfloat,xtfloat*,immediate) +BUILTIN(__builtin_xtensa_xt_ssi, "vff*i", "n") + +// xtfloat* __builtin_xtensa___builtin_xtensa_xt_ssip(xtfloat,xtfloat*,immediate) +BUILTIN(__builtin_xtensa_xt_ssip, "f*ff*i", "n") + +// void __builtin_xtensa___builtin_xtensa_xt_ssx(xtfloat,xtfloat*,int) +BUILTIN(__builtin_xtensa_xt_ssx, "vff*i", "n") + +// xtfloat* __builtin_xtensa___builtin_xtensa_xt_ssxp(xtfloat,xtfloat*,int) +BUILTIN(__builtin_xtensa_xt_ssxp, "f*ff*i", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_wfr(int) +BUILTIN(__builtin_xtensa_xt_wfr, "fi", "n") + +// void __builtin_xtensa___builtin_xtensa_xt_wur_fcr(int) +BUILTIN(__builtin_xtensa_xt_wur_fcr, "vi", "n") + +// void __builtin_xtensa___builtin_xtensa_xt_wur_fsr(int) +BUILTIN(__builtin_xtensa_xt_wur_fsr, "vi", "n") + // generated code #include "clang/Basic/BuiltinsXtensaESP32S3.def" -#undef BUILTIN \ No newline at end of file diff --git a/clang/include/clang/Basic/BuiltinsXtensaHIFI.def b/clang/include/clang/Basic/BuiltinsXtensaHIFI.def new file mode 100644 index 0000000000000..1ccfb0cdeda15 --- /dev/null +++ b/clang/include/clang/Basic/BuiltinsXtensaHIFI.def @@ -0,0 +1,2619 @@ +//===-- BuiltinsXtensaHIFI.def - Xtensa HIFI Builtin function database ----*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the builtin function database for Xtensa HIFI extension. Users of +// this file must define the BUILTIN macro to make use of this information. +// +//===----------------------------------------------------------------------===// + +// The format of this database matches clang/Basic/Builtins.def.// ae_int16x4 __builtin_xtensa_ae_abs16s(ae_int16x4 ae_arth_v1) + +// ae_int16x4 __builtin_xtensa_ae_abs16s(ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs16s, "V4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_abs24s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs24s, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_abs32(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs32, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_abs32s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs32s, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_abs64(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs64, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_abs64s(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs64s, "V1LLiV1LLi", "n") + +// ae_int16x4 __builtin_xtensa_ae_add16(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add16, "V4sV4sV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_add16s(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add16s, "V4sV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_add24s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add24s, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_add32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add32, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_add32_hl_lh(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add32_hl_lh, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_add32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_add64(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add64, "V1LLiV1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_add64s(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add64s, "V1LLiV1LLiV1LLi", "n") + +// int __builtin_xtensa_ae_addbrba32(int art,int ars) +BUILTIN(__builtin_xtensa_ae_addbrba32, "iii", "n") + +// ae_int32x2 __builtin_xtensa_ae_addsub32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_addsub32, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_addsub32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_addsub32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_and(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) +BUILTIN(__builtin_xtensa_ae_and, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_cvt32x2f16_10(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvt32x2f16_10, "V2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_cvt32x2f16_32(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvt32x2f16_32, "V2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_cvt48a32(int ars) +BUILTIN(__builtin_xtensa_ae_cvt48a32, "V1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_cvt64a32(int ars) +BUILTIN(__builtin_xtensa_ae_cvt64a32, "V1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_cvt64f32_h(ae_int32x2 ae_dr_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvt64f32_h, "V1LLiV2i", "n") + +// int __builtin_xtensa_ae_cvta32f24s_h(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_cvta32f24s_h, "iV2i", "n") + +// int __builtin_xtensa_ae_cvta32f24s_l(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_cvta32f24s_l, "iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_cvtq56a32s(int ars) +BUILTIN(__builtin_xtensa_ae_cvtq56a32s, "V1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_cvtq56p32s_h(ae_int32x2 ae_dr_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvtq56p32s_h, "V1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_cvtq56p32s_l(ae_int32x2 ae_dr_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvtq56p32s_l, "V1LLiV2i", "n") + +// void __builtin_xtensa_ae_db(const short** ars,int art) +BUILTIN(__builtin_xtensa_ae_db, "vsC**i", "n") + +// void __builtin_xtensa_ae_db_ic(const short** ars,int art) +BUILTIN(__builtin_xtensa_ae_db_ic, "vsC**i", "n") + +// void __builtin_xtensa_ae_db_ip(const short** ars,int art) +BUILTIN(__builtin_xtensa_ae_db_ip, "vsC**i", "n") + +// void __builtin_xtensa_ae_dbi(const short** ars,immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_dbi, "vsC**i", "n") + +// void __builtin_xtensa_ae_dbi_ic(const short** ars,immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_dbi_ic, "vsC**i", "n") + +// void __builtin_xtensa_ae_dbi_ip(const short** ars,immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_dbi_ip, "vsC**i", "n") + +// void __builtin_xtensa_ae_div64d32_h(ae_int64* ae_arth_v,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_div64d32_h, "vV1LLi*V2i", "n") + +// void __builtin_xtensa_ae_div64d32_l(ae_int64* ae_arth_v,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_div64d32_l, "vV1LLi*V2i", "n") + +// xtbool4 __builtin_xtensa_ae_eq16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_eq16, "V4bV4sV4s", "n") + +// xtbool2 __builtin_xtensa_ae_eq32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_eq32, "V2bV2iV2i", "n") + +// xtbool __builtin_xtensa_ae_eq64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_eq64, "V1bV1LLiV1LLi", "n") + +// ae_int16x4 __builtin_xtensa_ae_l16_i(const ae_int16* ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_l16_i, "V4sV1sC*i", "n") + +// void __builtin_xtensa_ae_l16_ip(ae_int16x4* ae_ls_v,const ae_int16** ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_l16_ip, "vV4s*V1sC**i", "n") + +// ae_int16x4 __builtin_xtensa_ae_l16_x(const ae_int16* ars,int art) +BUILTIN(__builtin_xtensa_ae_l16_x, "V4sV1sC*i", "n") + +// void __builtin_xtensa_ae_l16_xc(ae_int16x4* ae_ls_v,const ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16_xc, "vV4s*V1sC**i", "n") + +// void __builtin_xtensa_ae_l16_xp(ae_int16x4* ae_ls_v,const ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16_xp, "vV4s*V1sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l16m_i(const ae_int16* ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_l16m_i, "V2iV1sC*i", "n") + +// void __builtin_xtensa_ae_l16m_iu(ae_int32x2* ae_ls_v,const ae_int16** ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_l16m_iu, "vV2i*V1sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l16m_x(const ae_int16* ars,int art) +BUILTIN(__builtin_xtensa_ae_l16m_x, "V2iV1sC*i", "n") + +// void __builtin_xtensa_ae_l16m_xc(ae_int32x2* ae_ls_v,const ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16m_xc, "vV2i*V1sC**i", "n") + +// void __builtin_xtensa_ae_l16m_xu(ae_int32x2* ae_ls_v,const ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16m_xu, "vV2i*V1sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l16x2m_i(const ae_int16x2* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l16x2m_i, "V2iV2sC*i", "n") + +// void __builtin_xtensa_ae_l16x2m_iu(ae_int32x2* ae_ls_v,const ae_int16x2** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l16x2m_iu, "vV2i*V2sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l16x2m_x(const ae_int16x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x2m_x, "V2iV2sC*i", "n") + +// void __builtin_xtensa_ae_l16x2m_xc(ae_int32x2* ae_ls_v,const ae_int16x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x2m_xc, "vV2i*V2sC**i", "n") + +// void __builtin_xtensa_ae_l16x2m_xu(ae_int32x2* ae_ls_v,const ae_int16x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x2m_xu, "vV2i*V2sC**i", "n") + +// ae_int16x4 __builtin_xtensa_ae_l16x4_i(const ae_int16x4* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l16x4_i, "V4sV4sC*i", "n") + +// void __builtin_xtensa_ae_l16x4_ip(ae_int16x4* ae_ls_v,const ae_int16x4** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_l16x4_ip, "vV4s*V4sC**i", "n") + +// void __builtin_xtensa_ae_l16x4_ric(ae_int16x4* ae_ls_v,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_l16x4_ric, "vV4s*V4sC**", "n") + +// void __builtin_xtensa_ae_l16x4_rip(ae_int16x4* ae_ls_v,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_l16x4_rip, "vV4s*V4sC**", "n") + +// ae_int16x4 __builtin_xtensa_ae_l16x4_x(const ae_int16x4* ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x4_x, "V4sV4sC*i", "n") + +// void __builtin_xtensa_ae_l16x4_xc(ae_int16x4* ae_ls_v,const ae_int16x4** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x4_xc, "vV4s*V4sC**i", "n") + +// void __builtin_xtensa_ae_l16x4_xp(ae_int16x4* ae_ls_v,const ae_int16x4** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x4_xp, "vV4s*V4sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32_i(const ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32_i, "V2iV1iC*i", "n") + +// void __builtin_xtensa_ae_l32_ip(ae_int32x2* ae_ls_v,const ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32_ip, "vV2i*V1iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32_x(const ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32_x, "V2iV1iC*i", "n") + +// void __builtin_xtensa_ae_l32_xc(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32_xc, "vV2i*V1iC**i", "n") + +// void __builtin_xtensa_ae_l32_xp(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32_xp, "vV2i*V1iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32f24_i(const ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32f24_i, "V2iV1iC*i", "n") + +// void __builtin_xtensa_ae_l32f24_ip(ae_int32x2* ae_ls_v,const ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32f24_ip, "vV2i*V1iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32f24_x(const ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32f24_x, "V2iV1iC*i", "n") + +// void __builtin_xtensa_ae_l32f24_xc(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32f24_xc, "vV2i*V1iC**i", "n") + +// void __builtin_xtensa_ae_l32f24_xp(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32f24_xp, "vV2i*V1iC**i", "n") + +// ae_int64 __builtin_xtensa_ae_l32m_i(const ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32m_i, "V1LLiV1iC*i", "n") + +// void __builtin_xtensa_ae_l32m_iu(ae_int64* ae_ls_v,const ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32m_iu, "vV1LLi*V1iC**i", "n") + +// ae_int64 __builtin_xtensa_ae_l32m_x(const ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32m_x, "V1LLiV1iC*i", "n") + +// void __builtin_xtensa_ae_l32m_xc(ae_int64* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32m_xc, "vV1LLi*V1iC**i", "n") + +// void __builtin_xtensa_ae_l32m_xu(ae_int64* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32m_xu, "vV1LLi*V1iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32x2_i(const ae_int32x2* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l32x2_i, "V2iV2iC*i", "n") + +// void __builtin_xtensa_ae_l32x2_ip(ae_int32x2* ae_ls_v,const ae_int32x2** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_l32x2_ip, "vV2i*V2iC**i", "n") + +// void __builtin_xtensa_ae_l32x2_ric(ae_int32x2* ae_ls_v,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_l32x2_ric, "vV2i*V2iC**", "n") + +// void __builtin_xtensa_ae_l32x2_rip(ae_int32x2* ae_ls_v,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_l32x2_rip, "vV2i*V2iC**", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32x2_x(const ae_int32x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2_x, "V2iV2iC*i", "n") + +// void __builtin_xtensa_ae_l32x2_xc(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2_xc, "vV2i*V2iC**i", "n") + +// void __builtin_xtensa_ae_l32x2_xp(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2_xp, "vV2i*V2iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32x2f24_i(const ae_int32x2* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l32x2f24_i, "V2iV2iC*i", "n") + +// void __builtin_xtensa_ae_l32x2f24_ip(ae_int32x2* ae_ls_v,const ae_int32x2** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_l32x2f24_ip, "vV2i*V2iC**i", "n") + +// void __builtin_xtensa_ae_l32x2f24_ric(ae_int32x2* ae_ls_v,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_l32x2f24_ric, "vV2i*V2iC**", "n") + +// void __builtin_xtensa_ae_l32x2f24_rip(ae_int32x2* ae_ls_v,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_l32x2f24_rip, "vV2i*V2iC**", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32x2f24_x(const ae_int32x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2f24_x, "V2iV2iC*i", "n") + +// void __builtin_xtensa_ae_l32x2f24_xc(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2f24_xc, "vV2i*V2iC**i", "n") + +// void __builtin_xtensa_ae_l32x2f24_xp(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2f24_xp, "vV2i*V2iC**i", "n") + +// ae_int64 __builtin_xtensa_ae_l64_i(const ae_int64* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l64_i, "V1LLiV1LLiC*i", "n") + +// void __builtin_xtensa_ae_l64_ip(ae_int64* ae_ls_v,const ae_int64** ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l64_ip, "vV1LLi*V1LLiC**i", "n") + +// ae_int64 __builtin_xtensa_ae_l64_x(const ae_int64* ars,int art) +BUILTIN(__builtin_xtensa_ae_l64_x, "V1LLiV1LLiC*i", "n") + +// void __builtin_xtensa_ae_l64_xc(ae_int64* ae_ls_v,const ae_int64** ars,int art) +BUILTIN(__builtin_xtensa_ae_l64_xc, "vV1LLi*V1LLiC**i", "n") + +// void __builtin_xtensa_ae_l64_xp(ae_int64* ae_ls_v,const ae_int64** ars,int art) +BUILTIN(__builtin_xtensa_ae_l64_xp, "vV1LLi*V1LLiC**i", "n") + +// void __builtin_xtensa_ae_la16x4_ic(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4_ic, "vV4s*V8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4_ip(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4_ip, "vV4s*V8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4_ric(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4_ric, "vV4s*V8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4_rip(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4_rip, "vV4s*V8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4neg_pc(ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4neg_pc, "vV8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4pos_pc(ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4pos_pc, "vV8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la24_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24_ic, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24_ip, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24_ric, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24_rip, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24neg_pc(ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24neg_pc, "vV8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24pos_pc(ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24pos_pc, "vV8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2_ic, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2_ip, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2_ric, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2_rip, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2neg_pc(ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2neg_pc, "vV8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2pos_pc(ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2pos_pc, "vV8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la32x2_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2_ic, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2_ip, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2_ric, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2_rip, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2f24_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2f24_ic, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2f24_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2f24_ip, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2f24_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2f24_ric, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2f24_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2f24_rip, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2neg_pc(ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2neg_pc, "vV8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2pos_pc(ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2pos_pc, "vV8Uc*V2iC**", "n") + +// ae_valign __builtin_xtensa_ae_la64_pp(const void* ars) +BUILTIN(__builtin_xtensa_ae_la64_pp, "V8UcvC*", "n") + +// ae_valign __builtin_xtensa_ae_lalign64_i(const ae_valign* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_lalign64_i, "V8UcV8UcC*i", "n") + +// int __builtin_xtensa_ae_lb(int art) +BUILTIN(__builtin_xtensa_ae_lb, "ii", "n") + +// int __builtin_xtensa_ae_lbi(immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_lbi, "ii", "n") + +// int __builtin_xtensa_ae_lbk(int ars,int art) +BUILTIN(__builtin_xtensa_ae_lbk, "iii", "n") + +// int __builtin_xtensa_ae_lbki(int ars,immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_lbki, "iii", "n") + +// int __builtin_xtensa_ae_lbs(int art) +BUILTIN(__builtin_xtensa_ae_lbs, "ii", "n") + +// int __builtin_xtensa_ae_lbsi(immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_lbsi, "ii", "n") + +// xtbool4 __builtin_xtensa_ae_le16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_le16, "V4bV4sV4s", "n") + +// xtbool2 __builtin_xtensa_ae_le32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_le32, "V2bV2iV2i", "n") + +// xtbool __builtin_xtensa_ae_le64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_le64, "V1bV1LLiV1LLi", "n") + +// xtbool4 __builtin_xtensa_ae_lt16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_lt16, "V4bV4sV4s", "n") + +// xtbool2 __builtin_xtensa_ae_lt32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_lt32, "V2bV2iV2i", "n") + +// xtbool __builtin_xtensa_ae_lt64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_lt64, "V1bV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_max32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_max32, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_max64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_max64, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_maxabs32s(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_maxabs32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_maxabs64s(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_maxabs64s, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_min32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_min32, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_min64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_min64, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_minabs32s(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_minabs32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_minabs64s(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_minabs64s, "V1LLiV1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_mov(ae_int64 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_mov, "V1LLiV1LLi", "n") + +// int __builtin_xtensa_ae_movad16_0(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad16_0, "iV4s", "n") + +// int __builtin_xtensa_ae_movad16_1(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad16_1, "iV4s", "n") + +// int __builtin_xtensa_ae_movad16_2(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad16_2, "iV4s", "n") + +// int __builtin_xtensa_ae_movad16_3(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad16_3, "iV4s", "n") + +// int __builtin_xtensa_ae_movad32_h(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad32_h, "iV2i", "n") + +// int __builtin_xtensa_ae_movad32_l(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad32_l, "iV2i", "n") + +// ae_valign __builtin_xtensa_ae_movalign(ae_valign ae_uu_v) +BUILTIN(__builtin_xtensa_ae_movalign, "V8UcV8Uc", "n") + +// ae_int16x4 __builtin_xtensa_ae_movda16(int ars) +BUILTIN(__builtin_xtensa_ae_movda16, "V4si", "n") + +// ae_int16x4 __builtin_xtensa_ae_movda16x2(int ars,int art) +BUILTIN(__builtin_xtensa_ae_movda16x2, "V4sii", "n") + +// ae_int32 __builtin_xtensa_ae_movda32(int ars) +BUILTIN(__builtin_xtensa_ae_movda32, "V1ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_movda32x2(int ars,int art) +BUILTIN(__builtin_xtensa_ae_movda32x2, "V2iii", "n") + +// void __builtin_xtensa_ae_movf16x4(ae_int16x4* ae_cmov_v,ae_int16x4 ae_cmov_v0,xtbool4 bt4) +BUILTIN(__builtin_xtensa_ae_movf16x4, "vV4s*V4sV4b", "n") + +// void __builtin_xtensa_ae_movf32x2(ae_int32x2* ae_cmov_v,ae_int32x2 ae_cmov_v0,xtbool2 bt2) +BUILTIN(__builtin_xtensa_ae_movf32x2, "vV2i*V2iV2b", "n") + +// void __builtin_xtensa_ae_movf64(ae_int64* ae_cmov_v,ae_int64 ae_cmov_v0,xtbool bt) +BUILTIN(__builtin_xtensa_ae_movf64, "vV1LLi*V1LLiV1b", "n") + +// ae_int32x2 __builtin_xtensa_ae_movi(immediate movi_imm) +BUILTIN(__builtin_xtensa_ae_movi, "V2ii", "n") + +// void __builtin_xtensa_ae_movt16x4(ae_int16x4* ae_cmov_v,ae_int16x4 ae_cmov_v0,xtbool4 bt4) +BUILTIN(__builtin_xtensa_ae_movt16x4, "vV4s*V4sV4b", "n") + +// void __builtin_xtensa_ae_movt32x2(ae_int32x2* ae_cmov_v,ae_int32x2 ae_cmov_v0,xtbool2 bt2) +BUILTIN(__builtin_xtensa_ae_movt32x2, "vV2i*V2iV2b", "n") + +// void __builtin_xtensa_ae_movt64(ae_int64* ae_cmov_v,ae_int64 ae_cmov_v0,xtbool bt) +BUILTIN(__builtin_xtensa_ae_movt64, "vV1LLi*V1LLiV1b", "n") + +// void __builtin_xtensa_ae_mul16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mul16x4, "vV2i*V2i*V4sV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32_hh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32u_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32u_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l3_s2, "V1LLiV2iV4s", "n") + +// void __builtin_xtensa_ae_mula16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mula16x4, "vV2i*V2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mula32_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32u_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32u_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaad24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaad24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaad24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaad24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaad24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h0_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h0_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h0_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h0_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h2_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h2_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h2_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h2_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_11_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_11_00, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_11_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_11_00_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_13_02(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_13_02, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_13_02_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_13_02_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_33_22(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_33_22, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_33_22_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_33_22_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaafd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaafd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaafd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h0_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h0_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h0_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h0_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h2_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h2_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h2_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h2_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulac24(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulac24, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulac32x16_h(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulac32x16_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulac32x16_l(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulac32x16_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_00, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_00_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_10(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_10, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_11(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_11, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_20(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_20, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_21(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_21, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_22(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_22, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_30(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_30, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_31(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_31, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_32(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_32, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_33(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_33, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mulaf16x4ss, "vV2i*V2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf32r_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32r_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32r_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32r_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32r_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32r_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32r_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32r_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32s_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32s_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32s_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32s_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf48q32sp16s_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf48q32sp16s_l, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulaf48q32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf48q32sp16s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulaf48q32sp16u_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf48q32sp16u_l, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulaf48q32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf48q32sp16u_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulafc24ra(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int32x2 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulafc24ra, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafc32x16ras_h(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulafc32x16ras_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafc32x16ras_l(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulafc32x16ras_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafd24x2_fir_h(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd24x2_fir_h, "vV1LLi*V1LLi*V2iV2iV2i", "n") + +// void __builtin_xtensa_ae_mulafd24x2_fir_l(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd24x2_fir_l, "vV1LLi*V1LLi*V2iV2iV2i", "n") + +// void __builtin_xtensa_ae_mulafd32x16x2_fir_hh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd32x16x2_fir_hh, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulafd32x16x2_fir_hl(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd32x16x2_fir_hl, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulafd32x16x2_fir_lh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd32x16x2_fir_lh, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulafd32x16x2_fir_ll(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd32x16x2_fir_ll, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp24x2r(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp24x2r, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp24x2r_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp24x2r_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp24x2ra(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp24x2ra, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp24x2ra_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp24x2ra_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2ras_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2ras_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2ras_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2ras_h_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2ras_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2ras_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2ras_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2ras_l_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2rs_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2rs_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2rs_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2rs_h_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2rs_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2rs_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2rs_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2rs_l_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x2ras(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x2ras, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp32x2rs(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x2rs, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafq32sp24s_h_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulafq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafq32sp24s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulap24x2(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulap24x2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulap24x2_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulap24x2_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulap32x16x2_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulap32x16x2_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulap32x16x2_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulap32x16x2_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulap32x2(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulap32x2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaq32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaq32sp16s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulaq32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaq32sp16u_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mularfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mularfq32sp24s_h_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mularfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mularfq32sp24s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_hh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_hh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulasd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulasd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulasd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulasd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasfd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasfd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasfd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasfd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasfd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasfd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasfd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasfd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulc24(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulc24, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulc32x16_h(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulc32x16_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulc32x16_l(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulc32x16_l, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_00, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_00_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_10(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_10, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_11(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_11, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_20(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_20, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_21(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_21, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_22(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_22, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_30(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_30, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_31(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_31, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_32(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_32, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_33(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_33, "V2iV4sV4s", "n") + +// void __builtin_xtensa_ae_mulf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mulf16x4ss, "vV2i*V2i*V4sV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32r_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32r_hh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32r_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32r_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32r_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32r_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32r_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32r_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32s_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32s_hh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32s_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32s_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32s_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32s_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32s_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32s_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf48q32sp16s_l(ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf48q32sp16s_l, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf48q32sp16s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf48q32sp16s_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf48q32sp16u_l(ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf48q32sp16u_l, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf48q32sp16u_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf48q32sp16u_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfc24ra(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int32x2 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulfc24ra, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfc32x16ras_h(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulfc32x16ras_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfc32x16ras_l(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulfc32x16ras_l, "V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulfd24x2_fir_h(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd24x2_fir_h, "vV1LLi*V1LLi*V2iV2iV2i", "n") + +// void __builtin_xtensa_ae_mulfd24x2_fir_l(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd24x2_fir_l, "vV1LLi*V1LLi*V2iV2iV2i", "n") + +// void __builtin_xtensa_ae_mulfd32x16x2_fir_hh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd32x16x2_fir_hh, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulfd32x16x2_fir_hl(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd32x16x2_fir_hl, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulfd32x16x2_fir_lh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd32x16x2_fir_lh, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulfd32x16x2_fir_ll(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd32x16x2_fir_ll, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_mulfp16x4ras(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulfp16x4ras, "V4sV4sV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_mulfp16x4s(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulfp16x4s, "V4sV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp24x2r(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp24x2r, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp24x2r_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp24x2r_s2, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp24x2ra(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp24x2ra, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp24x2ra_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp24x2ra_s2, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2ras_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2ras_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2ras_h_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2ras_h_s2, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2ras_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2ras_l, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2ras_l_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2ras_l_s2, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2rs_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2rs_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2rs_h_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2rs_h_s2, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2rs_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2rs_l, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2rs_l_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2rs_l_s2, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x2ras(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x2ras, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x2rs(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x2rs, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulfq32sp24s_h_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfq32sp24s_h_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulfq32sp24s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfq32sp24s_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp24x2(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulp24x2, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp24x2_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulp24x2_s2, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp32x16x2_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulp32x16x2_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp32x16x2_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulp32x16x2_l, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp32x2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulp32x2, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulq32sp16s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulq32sp16s_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulq32sp16u_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulq32sp16u_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulrfq32sp24s_h_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulrfq32sp24s_h_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulrfq32sp24s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulrfq32sp24s_l_s2, "V1LLiV1LLiV2i", "n") + +// void __builtin_xtensa_ae_muls16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_muls16x4, "vV2i*V2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_muls32_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_muls32_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_muls32_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32_ll, "vV1LLi*V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_hh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_hh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_hh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_ll_s2, "V1LLiV2iV2i", "n") + +// void __builtin_xtensa_ae_muls32u_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32u_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_muls32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsad24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsad24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsad24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsad24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsad32x16_h1_l0(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsad32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsad32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsad32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsad32x16_h3_l2(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsad32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsad32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsad32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsafd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsafd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsafd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsafd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsafd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsafd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_00, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_00_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_10(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_10, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_11(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_11, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_20(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_20, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_21(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_21, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_22(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_22, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_30(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_30, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_31(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_31, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_32(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_32, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_33(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_33, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mulsf16x4ss, "vV2i*V2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf32r_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32r_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32r_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32r_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32r_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32r_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32r_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32r_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32s_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32s_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32s_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf48q32sp16s_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf48q32sp16s_l, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsf48q32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf48q32sp16s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsf48q32sp16u_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf48q32sp16u_l, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsf48q32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf48q32sp16u_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsfp24x2r(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp24x2r, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp24x2r_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp24x2r_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp24x2ra(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp24x2ra, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp24x2ra_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp24x2ra_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2ras_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2ras_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2ras_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2ras_h_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2ras_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2ras_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2ras_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2ras_l_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2rs_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2rs_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2rs_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2rs_h_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2rs_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2rs_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2rs_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2rs_l_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x2ras(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x2ras, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp32x2rs(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x2rs, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfq32sp24s_h_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfq32sp24s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsp24x2(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsp24x2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsp24x2_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsp24x2_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsp32x16x2_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsp32x16x2_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsp32x16x2_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsp32x16x2_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsp32x2(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsp32x2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsq32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsq32sp16s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsq32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsq32sp16u_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsrfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsrfq32sp24s_h_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsrfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsrfq32sp24s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_hh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_hh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulssd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulssd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_11_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_11_00, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_11_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_11_00_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_13_02(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_13_02, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_13_02_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_13_02_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_33_22(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_33_22, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_33_22_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_33_22_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssfd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssfd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssfd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssfd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssfd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssfd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssfd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h0_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h0_l1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h0_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h0_l1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h2_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h2_l3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h2_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h2_l3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_11_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_11_00, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_11_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_11_00_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_13_02(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_13_02, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_13_02_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_13_02_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_33_22(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_33_22, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_33_22_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_33_22_s2, "V2iV4sV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h0_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h0_l1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h2_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h2_l3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad32x16_h1_l0(ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad32x16_h3_l2(ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_11_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_11_00, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_11_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_11_00_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_13_02(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_13_02, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_13_02_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_13_02_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_33_22(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_33_22, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_33_22_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_33_22_s2, "V2iV4sV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_nand(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) +BUILTIN(__builtin_xtensa_ae_nand, "V1LLiV1LLiV1LLi", "n") + +// ae_int16x4 __builtin_xtensa_ae_neg16s(ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg16s, "V4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_neg24s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg24s, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_neg32(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg32, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_neg32s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg32s, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_neg64(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg64, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_neg64s(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg64s, "V1LLiV1LLi", "n") + +// int __builtin_xtensa_ae_nsa64(ae_int64 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_nsa64, "iV1LLi", "n") + +// int __builtin_xtensa_ae_nsaz16_0(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_nsaz16_0, "iV4s", "n") + +// int __builtin_xtensa_ae_nsaz32_l(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_nsaz32_l, "iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_or(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) +BUILTIN(__builtin_xtensa_ae_or, "V1LLiV1LLiV1LLi", "n") + +// void __builtin_xtensa_ae_pksr24(ae_int32x2* ae_pks_d,ae_int64 ae_pks_s,immediate ae_imm2) +BUILTIN(__builtin_xtensa_ae_pksr24, "vV2i*V1LLii", "n") + +// void __builtin_xtensa_ae_pksr32(ae_int32x2* ae_pks_d,ae_int64 ae_pks_s,immediate ae_imm2) +BUILTIN(__builtin_xtensa_ae_pksr32, "vV2i*V1LLii", "n") + +// ae_int16x4 __builtin_xtensa_ae_round16x4f32sasym(ae_int32x2 ae_arth_v1,ae_int32x2 ae_arth_v0) +BUILTIN(__builtin_xtensa_ae_round16x4f32sasym, "V4sV2iV2i", "n") + +// ae_int16x4 __builtin_xtensa_ae_round16x4f32ssym(ae_int32x2 ae_arth_v1,ae_int32x2 ae_arth_v0) +BUILTIN(__builtin_xtensa_ae_round16x4f32ssym, "V4sV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_round24x2f48sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round24x2f48sasym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round24x2f48ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round24x2f48ssym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round32x2f48sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round32x2f48sasym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round32x2f48ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round32x2f48ssym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round32x2f64sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round32x2f64sasym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round32x2f64ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round32x2f64ssym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_roundsp16f24asym(ae_int32x2 ae_arth_v0) +BUILTIN(__builtin_xtensa_ae_roundsp16f24asym, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_roundsp16f24sym(ae_int32x2 ae_arth_v0) +BUILTIN(__builtin_xtensa_ae_roundsp16f24sym, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_roundsp16q48x2asym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_roundsp16q48x2asym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_roundsp16q48x2sym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_roundsp16q48x2sym, "V2iV1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_roundsq32f48asym(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_roundsq32f48asym, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_roundsq32f48sym(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_roundsq32f48sym, "V1LLiV1LLi", "n") + +// void __builtin_xtensa_ae_s16_0_i(ae_int16x4 ae_ls_v,ae_int16* ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_s16_0_i, "vV4sV1s*i", "n") + +// void __builtin_xtensa_ae_s16_0_ip(ae_int16x4 ae_ls_v,ae_int16** ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_s16_0_ip, "vV4sV1s**i", "n") + +// void __builtin_xtensa_ae_s16_0_x(ae_int16x4 ae_ls_v,ae_int16* ars,int art) +BUILTIN(__builtin_xtensa_ae_s16_0_x, "vV4sV1s*i", "n") + +// void __builtin_xtensa_ae_s16_0_xc(ae_int16x4 ae_ls_v,ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16_0_xc, "vV4sV1s**i", "n") + +// void __builtin_xtensa_ae_s16_0_xp(ae_int16x4 ae_ls_v,ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16_0_xp, "vV4sV1s**i", "n") + +// void __builtin_xtensa_ae_s16m_l_i(ae_int32x2 ae_ls_v,ae_int16* ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_s16m_l_i, "vV2iV1s*i", "n") + +// void __builtin_xtensa_ae_s16m_l_iu(ae_int32x2 ae_ls_v,ae_int16** ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_s16m_l_iu, "vV2iV1s**i", "n") + +// void __builtin_xtensa_ae_s16m_l_x(ae_int32x2 ae_ls_v,ae_int16* ars,int art) +BUILTIN(__builtin_xtensa_ae_s16m_l_x, "vV2iV1s*i", "n") + +// void __builtin_xtensa_ae_s16m_l_xc(ae_int32x2 ae_ls_v,ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16m_l_xc, "vV2iV1s**i", "n") + +// void __builtin_xtensa_ae_s16m_l_xu(ae_int32x2 ae_ls_v,ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16m_l_xu, "vV2iV1s**i", "n") + +// void __builtin_xtensa_ae_s16x2m_i(ae_int32x2 ae_ls_v,ae_int16x2* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s16x2m_i, "vV2iV2s*i", "n") + +// void __builtin_xtensa_ae_s16x2m_iu(ae_int32x2 ae_ls_v,ae_int16x2** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s16x2m_iu, "vV2iV2s**i", "n") + +// void __builtin_xtensa_ae_s16x2m_x(ae_int32x2 ae_ls_v,ae_int16x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x2m_x, "vV2iV2s*i", "n") + +// void __builtin_xtensa_ae_s16x2m_xc(ae_int32x2 ae_ls_v,ae_int16x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x2m_xc, "vV2iV2s**i", "n") + +// void __builtin_xtensa_ae_s16x2m_xu(ae_int32x2 ae_ls_v,ae_int16x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x2m_xu, "vV2iV2s**i", "n") + +// void __builtin_xtensa_ae_s16x4_i(ae_int16x4 ae_ls_v,ae_int16x4* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s16x4_i, "vV4sV4s*i", "n") + +// void __builtin_xtensa_ae_s16x4_ip(ae_int16x4 ae_ls_v,ae_int16x4** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_s16x4_ip, "vV4sV4s**i", "n") + +// void __builtin_xtensa_ae_s16x4_ric(ae_int16x4 ae_ls_v,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_s16x4_ric, "vV4sV4s**", "n") + +// void __builtin_xtensa_ae_s16x4_rip(ae_int16x4 ae_ls_v,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_s16x4_rip, "vV4sV4s**", "n") + +// void __builtin_xtensa_ae_s16x4_x(ae_int16x4 ae_ls_v,ae_int16x4* ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x4_x, "vV4sV4s*i", "n") + +// void __builtin_xtensa_ae_s16x4_xc(ae_int16x4 ae_ls_v,ae_int16x4** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x4_xc, "vV4sV4s**i", "n") + +// void __builtin_xtensa_ae_s16x4_xp(ae_int16x4 ae_ls_v,ae_int16x4** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x4_xp, "vV4sV4s**i", "n") + +// void __builtin_xtensa_ae_s24ra64s_i(ae_int64 ae_ls_v1,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s24ra64s_i, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s24ra64s_ip(ae_int64 ae_ls_v1,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s24ra64s_ip, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s24ra64s_x(ae_int64 ae_ls_v1,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s24ra64s_x, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s24ra64s_xc(ae_int64 ae_ls_v1,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s24ra64s_xc, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s24ra64s_xp(ae_int64 ae_ls_v1,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s24ra64s_xp, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s24x2ra64s_ip(ae_int64 ae_ls_v2,ae_int64 ae_ls_v1,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s24x2ra64s_ip, "vV1LLiV1LLiV2i**", "n") + +// void __builtin_xtensa_ae_s32_l_i(ae_int32x2 ae_ls_v,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32_l_i, "vV2iV1i*i", "n") + +// void __builtin_xtensa_ae_s32_l_ip(ae_int32x2 ae_ls_v,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32_l_ip, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32_l_x(ae_int32x2 ae_ls_v,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32_l_x, "vV2iV1i*i", "n") + +// void __builtin_xtensa_ae_s32_l_xc(ae_int32x2 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32_l_xc, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32_l_xp(ae_int32x2 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32_l_xp, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32f24_l_i(ae_int32x2 ae_ls_v,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32f24_l_i, "vV2iV1i*i", "n") + +// void __builtin_xtensa_ae_s32f24_l_ip(ae_int32x2 ae_ls_v,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32f24_l_ip, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32f24_l_x(ae_int32x2 ae_ls_v,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32f24_l_x, "vV2iV1i*i", "n") + +// void __builtin_xtensa_ae_s32f24_l_xc(ae_int32x2 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32f24_l_xc, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32f24_l_xp(ae_int32x2 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32f24_l_xp, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32m_i(ae_int64 ae_ls_v,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32m_i, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s32m_iu(ae_int64 ae_ls_v,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32m_iu, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32m_x(ae_int64 ae_ls_v,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32m_x, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s32m_xc(ae_int64 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32m_xc, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32m_xu(ae_int64 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32m_xu, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32ra64s_i(ae_int64 ae_ls_v1,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32ra64s_i, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s32ra64s_ip(ae_int64 ae_ls_v1,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32ra64s_ip, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32ra64s_x(ae_int64 ae_ls_v1,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32ra64s_x, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s32ra64s_xc(ae_int64 ae_ls_v1,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32ra64s_xc, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32ra64s_xp(ae_int64 ae_ls_v1,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32ra64s_xp, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32x2_i(ae_int32x2 ae_ls_v,ae_int32x2* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s32x2_i, "vV2iV2i*i", "n") + +// void __builtin_xtensa_ae_s32x2_ip(ae_int32x2 ae_ls_v,ae_int32x2** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_s32x2_ip, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2_ric(ae_int32x2 ae_ls_v,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2_ric, "vV2iV2i**", "n") + +// void __builtin_xtensa_ae_s32x2_rip(ae_int32x2 ae_ls_v,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2_rip, "vV2iV2i**", "n") + +// void __builtin_xtensa_ae_s32x2_x(ae_int32x2 ae_ls_v,ae_int32x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2_x, "vV2iV2i*i", "n") + +// void __builtin_xtensa_ae_s32x2_xc(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2_xc, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2_xp(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2_xp, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2f24_i(ae_int32x2 ae_ls_v,ae_int32x2* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s32x2f24_i, "vV2iV2i*i", "n") + +// void __builtin_xtensa_ae_s32x2f24_ip(ae_int32x2 ae_ls_v,ae_int32x2** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_s32x2f24_ip, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2f24_ric(ae_int32x2 ae_ls_v,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2f24_ric, "vV2iV2i**", "n") + +// void __builtin_xtensa_ae_s32x2f24_rip(ae_int32x2 ae_ls_v,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2f24_rip, "vV2iV2i**", "n") + +// void __builtin_xtensa_ae_s32x2f24_x(ae_int32x2 ae_ls_v,ae_int32x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2f24_x, "vV2iV2i*i", "n") + +// void __builtin_xtensa_ae_s32x2f24_xc(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2f24_xc, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2f24_xp(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2f24_xp, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2ra64s_ip(ae_int64 ae_ls_v2,ae_int64 ae_ls_v1,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2ra64s_ip, "vV1LLiV1LLiV2i**", "n") + +// void __builtin_xtensa_ae_s64_i(ae_int64 ae_ls_v,ae_int64* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s64_i, "vV1LLiV1LLi*i", "n") + +// void __builtin_xtensa_ae_s64_ip(ae_int64 ae_ls_v,ae_int64** ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s64_ip, "vV1LLiV1LLi**i", "n") + +// void __builtin_xtensa_ae_s64_x(ae_int64 ae_ls_v,ae_int64* ars,int art) +BUILTIN(__builtin_xtensa_ae_s64_x, "vV1LLiV1LLi*i", "n") + +// void __builtin_xtensa_ae_s64_xc(ae_int64 ae_ls_v,ae_int64** ars,int art) +BUILTIN(__builtin_xtensa_ae_s64_xc, "vV1LLiV1LLi**i", "n") + +// void __builtin_xtensa_ae_s64_xp(ae_int64 ae_ls_v,ae_int64** ars,int art) +BUILTIN(__builtin_xtensa_ae_s64_xp, "vV1LLiV1LLi**i", "n") + +// void __builtin_xtensa_ae_sa16x4_ic(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_sa16x4_ic, "vV4sV8Uc*V4s**", "n") + +// void __builtin_xtensa_ae_sa16x4_ip(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_sa16x4_ip, "vV4sV8Uc*V4s**", "n") + +// void __builtin_xtensa_ae_sa16x4_ric(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_sa16x4_ric, "vV4sV8Uc*V4s**", "n") + +// void __builtin_xtensa_ae_sa16x4_rip(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_sa16x4_rip, "vV4sV8Uc*V4s**", "n") + +// void __builtin_xtensa_ae_sa24_l_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24_l_ic, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24_l_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24_l_ip, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24_l_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24_l_ric, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24_l_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24_l_rip, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24x2_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24x2_ic, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24x2_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24x2_ip, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24x2_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24x2_ric, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24x2_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24x2_rip, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa32x2_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2_ic, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2_ip, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2_ric, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2_rip, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2f24_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2f24_ic, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2f24_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2f24_ip, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2f24_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2f24_ric, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2f24_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2f24_rip, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa64neg_fp(ae_valign* ae_ls_su,void* ars) +BUILTIN(__builtin_xtensa_ae_sa64neg_fp, "vV8Uc*v*", "n") + +// void __builtin_xtensa_ae_sa64pos_fp(ae_valign* ae_ls_su,void* ars) +BUILTIN(__builtin_xtensa_ae_sa64pos_fp, "vV8Uc*v*", "n") + +// void __builtin_xtensa_ae_salign64_i(ae_valign ae_ls_su,ae_valign* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_salign64_i, "vV8UcV8Uc*i", "n") + +// ae_int16x4 __builtin_xtensa_ae_sat16x4(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sat16x4, "V4sV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_sat24s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sat24s, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_sat48s(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sat48s, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_satq56s(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_satq56s, "V1LLiV1LLi", "n") + +// void __builtin_xtensa_ae_sb(short** ars,int art) +BUILTIN(__builtin_xtensa_ae_sb, "vs**i", "n") + +// void __builtin_xtensa_ae_sb_ic(short** ars,int art) +BUILTIN(__builtin_xtensa_ae_sb_ic, "vs**i", "n") + +// void __builtin_xtensa_ae_sb_ip(short** ars,int art) +BUILTIN(__builtin_xtensa_ae_sb_ip, "vs**i", "n") + +// void __builtin_xtensa_ae_sbf(short** ars) +BUILTIN(__builtin_xtensa_ae_sbf, "vs**", "n") + +// void __builtin_xtensa_ae_sbf_ic(short** ars) +BUILTIN(__builtin_xtensa_ae_sbf_ic, "vs**", "n") + +// void __builtin_xtensa_ae_sbf_ip(short** ars) +BUILTIN(__builtin_xtensa_ae_sbf_ip, "vs**", "n") + +// void __builtin_xtensa_ae_sbi(short** ars,int art,immediate ae_ohba2) +BUILTIN(__builtin_xtensa_ae_sbi, "vs**ii", "n") + +// void __builtin_xtensa_ae_sbi_ic(short** ars,int art,immediate ae_ohba2) +BUILTIN(__builtin_xtensa_ae_sbi_ic, "vs**ii", "n") + +// void __builtin_xtensa_ae_sbi_ip(short** ars,int art,immediate ae_ohba2) +BUILTIN(__builtin_xtensa_ae_sbi_ip, "vs**ii", "n") + +// ae_int16x4 __builtin_xtensa_ae_sel16i(ae_int16x4 ae_dr_to_dr_v0,ae_int16x4 ae_dr_to_dr_v1,immediate ae_selimm) +BUILTIN(__builtin_xtensa_ae_sel16i, "V4sV4sV4si", "n") + +// ae_int16x4 __builtin_xtensa_ae_sel16i_n(ae_int16x4 ae_dr_to_dr_v0,ae_int16x4 ae_dr_to_dr_v1,immediate ae_selimm_N) +BUILTIN(__builtin_xtensa_ae_sel16i_n, "V4sV4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_sext32(ae_int32x2 ae_dr_to_dr_v0,immediate ae_opnd_tp7) +BUILTIN(__builtin_xtensa_ae_sext32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_sext32x2d16_10(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_sext32x2d16_10, "V2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_sext32x2d16_32(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_sext32x2d16_32, "V2iV4s", "n") + +// int __builtin_xtensa_ae_sha32(int ars) +BUILTIN(__builtin_xtensa_ae_sha32, "ii", "n") + +// ae_int16x4 __builtin_xtensa_ae_shortswap(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_shortswap, "V4sV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_slaa16s(ae_int16x4 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa16s, "V4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_slaa32(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slaa32s(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa32s, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_slaa64(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa64, "V1LLiV1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_slaa64s(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa64s, "V1LLiV1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_slaaq56(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaaq56, "V1LLiV1LLii", "n") + +// ae_int16x4 __builtin_xtensa_ae_slai16s(ae_int16x4 ae_shift_d0,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_slai16s, "V4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_slai24(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_slai24, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slai24s(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_slai24s, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slai32(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_slai32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slai32s(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_slai32s, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_slai64(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_slai64, "V1LLiV1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_slai64s(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_slai64s, "V1LLiV1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_slaisq56s(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_slaisq56s, "V1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slas24(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas24, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_slas24s(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas24s, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_slas32(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas32, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_slas32s(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas32s, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_slas64(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas64, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_slas64s(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas64s, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_slasq56(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slasq56, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_slassq56s(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slassq56s, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_sra64_32(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sra64_32, "V1LLiV2ii", "n") + +// ae_int16x4 __builtin_xtensa_ae_sraa16rs(ae_int16x4 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa16rs, "V4sV4si", "n") + +// ae_int16x4 __builtin_xtensa_ae_sraa16s(ae_int16x4 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa16s, "V4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_sraa32(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_sraa32rs(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa32rs, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_sraa32s(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa32s, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_sraa64(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa64, "V1LLiV1LLii", "n") + +// ae_int16x4 __builtin_xtensa_ae_srai16(ae_int16x4 ae_shift_d0,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_srai16, "V4sV4si", "n") + +// ae_int16x4 __builtin_xtensa_ae_srai16r(ae_int16x4 ae_shift_d0,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_srai16r, "V4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_srai24(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srai24, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srai32(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srai32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srai32r(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srai32r, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_srai64(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_srai64, "V1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_sras24(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_sras24, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_sras32(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_sras32, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_sras64(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_sras64, "V1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_srla32(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_srla32, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_srla64(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_srla64, "V1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srli24(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srli24, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srli32(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srli32, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_srli64(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_srli64, "V1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srls24(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_srls24, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_srls32(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_srls32, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_srls64(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_srls64, "V1LLiV1LLi", "n") + +// ae_int16x4 __builtin_xtensa_ae_sub16(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub16, "V4sV4sV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_sub16s(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub16s, "V4sV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_sub24s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub24s, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_sub32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub32, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_sub32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_sub64(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub64, "V1LLiV1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_sub64s(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub64s, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_subadd32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_subadd32, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_subadd32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_subadd32s, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_trunca32f64s_l(ae_int32x2 ae_shift_d0,ae_int64 ae_shift_sd,int ars) +BUILTIN(__builtin_xtensa_ae_trunca32f64s_l, "V2iV2iV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_trunca32x2f64s(ae_int64 ae_shift_d0,ae_int64 ae_shift_sd,int ars) +BUILTIN(__builtin_xtensa_ae_trunca32x2f64s, "V2iV1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_trunci32f64s_l(ae_int32x2 ae_shift_d0,ae_int64 ae_shift_sd,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_trunci32f64s_l, "V2iV2iV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_trunci32x2f64s(ae_int64 ae_shift_d0,ae_int64 ae_shift_sd,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_trunci32x2f64s, "V2iV1LLiV1LLii", "n") + +// void __builtin_xtensa_ae_vldl16c(const short** ars) +BUILTIN(__builtin_xtensa_ae_vldl16c, "vsC**", "n") + +// void __builtin_xtensa_ae_vldl16c_ic(const short** ars) +BUILTIN(__builtin_xtensa_ae_vldl16c_ic, "vsC**", "n") + +// void __builtin_xtensa_ae_vldl16c_ip(const short** ars) +BUILTIN(__builtin_xtensa_ae_vldl16c_ip, "vsC**", "n") + +// void __builtin_xtensa_ae_vldl16t(xtbool* br,int* art,const short* ars) +BUILTIN(__builtin_xtensa_ae_vldl16t, "vV1b*i*sC*", "n") + +// void __builtin_xtensa_ae_vldl32t(xtbool* br,int* art,const int* ars) +BUILTIN(__builtin_xtensa_ae_vldl32t, "vV1b*i*iC*", "n") + +// void __builtin_xtensa_ae_vldsht(int art) +BUILTIN(__builtin_xtensa_ae_vldsht, "vi", "n") + +// void __builtin_xtensa_ae_vlel16t(xtbool* br,int* art,const short* ars) +BUILTIN(__builtin_xtensa_ae_vlel16t, "vV1b*i*sC*", "n") + +// void __builtin_xtensa_ae_vlel32t(xtbool* br,int* art,const int* ars) +BUILTIN(__builtin_xtensa_ae_vlel32t, "vV1b*i*iC*", "n") + +// void __builtin_xtensa_ae_vles16c(short** ars) +BUILTIN(__builtin_xtensa_ae_vles16c, "vs**", "n") + +// void __builtin_xtensa_ae_vles16c_ic(short** ars) +BUILTIN(__builtin_xtensa_ae_vles16c_ic, "vs**", "n") + +// void __builtin_xtensa_ae_vles16c_ip(short** ars) +BUILTIN(__builtin_xtensa_ae_vles16c_ip, "vs**", "n") + +// ae_int64 __builtin_xtensa_ae_xor(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) +BUILTIN(__builtin_xtensa_ae_xor, "V1LLiV1LLiV1LLi", "n") + +// ae_valign __builtin_xtensa_ae_zalign64() +BUILTIN(__builtin_xtensa_ae_zalign64, "V8Uc", "n") + +// int __builtin_xtensa_rur_ae_bithead() +BUILTIN(__builtin_xtensa_rur_ae_bithead, "i", "n") + +// int __builtin_xtensa_rur_ae_bitptr() +BUILTIN(__builtin_xtensa_rur_ae_bitptr, "i", "n") + +// int __builtin_xtensa_rur_ae_bitsused() +BUILTIN(__builtin_xtensa_rur_ae_bitsused, "i", "n") + +// int __builtin_xtensa_rur_ae_cbegin0() +BUILTIN(__builtin_xtensa_rur_ae_cbegin0, "i", "n") + +// int __builtin_xtensa_rur_ae_cend0() +BUILTIN(__builtin_xtensa_rur_ae_cend0, "i", "n") + +// int __builtin_xtensa_rur_ae_cw_sd_no() +BUILTIN(__builtin_xtensa_rur_ae_cw_sd_no, "i", "n") + +// int __builtin_xtensa_rur_ae_cwrap() +BUILTIN(__builtin_xtensa_rur_ae_cwrap, "i", "n") + +// int __builtin_xtensa_rur_ae_first_ts() +BUILTIN(__builtin_xtensa_rur_ae_first_ts, "i", "n") + +// int __builtin_xtensa_rur_ae_nextoffset() +BUILTIN(__builtin_xtensa_rur_ae_nextoffset, "i", "n") + +// int __builtin_xtensa_rur_ae_overflow() +BUILTIN(__builtin_xtensa_rur_ae_overflow, "i", "n") + +// int __builtin_xtensa_rur_ae_ovf_sar() +BUILTIN(__builtin_xtensa_rur_ae_ovf_sar, "i", "n") + +// int __builtin_xtensa_rur_ae_sar() +BUILTIN(__builtin_xtensa_rur_ae_sar, "i", "n") + +// int __builtin_xtensa_rur_ae_searchdone() +BUILTIN(__builtin_xtensa_rur_ae_searchdone, "i", "n") + +// int __builtin_xtensa_rur_ae_tablesize() +BUILTIN(__builtin_xtensa_rur_ae_tablesize, "i", "n") + +// int __builtin_xtensa_rur_ae_ts_fts_bu_bp() +BUILTIN(__builtin_xtensa_rur_ae_ts_fts_bu_bp, "i", "n") + +// void __builtin_xtensa_wur_ae_bithead(int art) +BUILTIN(__builtin_xtensa_wur_ae_bithead, "vi", "n") + +// void __builtin_xtensa_wur_ae_bitptr(int art) +BUILTIN(__builtin_xtensa_wur_ae_bitptr, "vi", "n") + +// void __builtin_xtensa_wur_ae_bitsused(int art) +BUILTIN(__builtin_xtensa_wur_ae_bitsused, "vi", "n") + +// void __builtin_xtensa_wur_ae_cbegin0(int art) +BUILTIN(__builtin_xtensa_wur_ae_cbegin0, "vi", "n") + +// void __builtin_xtensa_wur_ae_cend0(int art) +BUILTIN(__builtin_xtensa_wur_ae_cend0, "vi", "n") + +// void __builtin_xtensa_wur_ae_cw_sd_no(int art) +BUILTIN(__builtin_xtensa_wur_ae_cw_sd_no, "vi", "n") + +// void __builtin_xtensa_wur_ae_cwrap(int art) +BUILTIN(__builtin_xtensa_wur_ae_cwrap, "vi", "n") + +// void __builtin_xtensa_wur_ae_first_ts(int art) +BUILTIN(__builtin_xtensa_wur_ae_first_ts, "vi", "n") + +// void __builtin_xtensa_wur_ae_nextoffset(int art) +BUILTIN(__builtin_xtensa_wur_ae_nextoffset, "vi", "n") + +// void __builtin_xtensa_wur_ae_overflow(int art) +BUILTIN(__builtin_xtensa_wur_ae_overflow, "vi", "n") + +// void __builtin_xtensa_wur_ae_ovf_sar(int art) +BUILTIN(__builtin_xtensa_wur_ae_ovf_sar, "vi", "n") + +// void __builtin_xtensa_wur_ae_sar(int art) +BUILTIN(__builtin_xtensa_wur_ae_sar, "vi", "n") + +// void __builtin_xtensa_wur_ae_searchdone(int art) +BUILTIN(__builtin_xtensa_wur_ae_searchdone, "vi", "n") + +// void __builtin_xtensa_wur_ae_tablesize(int art) +BUILTIN(__builtin_xtensa_wur_ae_tablesize, "vi", "n") + +// void __builtin_xtensa_wur_ae_ts_fts_bu_bp(int art) +BUILTIN(__builtin_xtensa_wur_ae_ts_fts_bu_bp, "vi", "n") + +// Type conversion builtins +BUILTIN(__builtin_xtensa_ae_int32x2, "V2i.", "nct") +BUILTIN(__builtin_xtensa_ae_int32, "V1i.", "nct") diff --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h index af4b317a9cf84..3f76c3a4d7ebc 100644 --- a/clang/include/clang/Basic/TargetBuiltins.h +++ b/clang/include/clang/Basic/TargetBuiltins.h @@ -366,12 +366,14 @@ namespace clang { /// Xtensa builtins namespace Xtensa { - enum { - LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, + enum { + LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, #define BUILTIN(ID, TYPE, ATTRS) BI##ID, #include "clang/Basic/BuiltinsXtensa.def" - LastTSBuiltin - }; +#include "clang/Basic/BuiltinsXtensaHIFI.def" +#undef BUILTIN + LastTSBuiltin + }; } // namespace Xtensa static constexpr uint64_t LargestBuiltinID = std::max( diff --git a/clang/include/clang/Basic/XtensaBuiltins.inc b/clang/include/clang/Basic/XtensaBuiltins.inc new file mode 100644 index 0000000000000..1231d992c36c4 --- /dev/null +++ b/clang/include/clang/Basic/XtensaBuiltins.inc @@ -0,0 +1,1743 @@ +//===-- XtensaBuiltins.inc - Clang intrinsic database for Xtensa arch ----*- C++ +//-*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +case Xtensa::BI__builtin_xtensa_ae_abs16s: +return {Intrinsic::xtensa_ae_abs16s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs24s: +return {Intrinsic::xtensa_ae_abs24s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs32: +return {Intrinsic::xtensa_ae_abs32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs32s: +return {Intrinsic::xtensa_ae_abs32s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs64: +return {Intrinsic::xtensa_ae_abs64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs64s: +return {Intrinsic::xtensa_ae_abs64s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_add16: +return {Intrinsic::xtensa_ae_add16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add16s: +return {Intrinsic::xtensa_ae_add16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add24s: +return {Intrinsic::xtensa_ae_add24s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add32: +return {Intrinsic::xtensa_ae_add32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add32_hl_lh: +return {Intrinsic::xtensa_ae_add32_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add32s: +return {Intrinsic::xtensa_ae_add32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add64: +return {Intrinsic::xtensa_ae_add64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add64s: +return {Intrinsic::xtensa_ae_add64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_addbrba32: +return {Intrinsic::xtensa_ae_addbrba32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_addsub32: +return {Intrinsic::xtensa_ae_addsub32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_addsub32s: +return {Intrinsic::xtensa_ae_addsub32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_and: +return {Intrinsic::xtensa_ae_and, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_cvt32x2f16_10: +return {Intrinsic::xtensa_ae_cvt32x2f16_10, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvt32x2f16_32: +return {Intrinsic::xtensa_ae_cvt32x2f16_32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvt48a32: +return {Intrinsic::xtensa_ae_cvt48a32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvt64a32: +return {Intrinsic::xtensa_ae_cvt64a32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvt64f32_h: +return {Intrinsic::xtensa_ae_cvt64f32_h, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvta32f24s_h: +return {Intrinsic::xtensa_ae_cvta32f24s_h, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvta32f24s_l: +return {Intrinsic::xtensa_ae_cvta32f24s_l, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvtq56a32s: +return {Intrinsic::xtensa_ae_cvtq56a32s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvtq56p32s_h: +return {Intrinsic::xtensa_ae_cvtq56p32s_h, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvtq56p32s_l: +return {Intrinsic::xtensa_ae_cvtq56p32s_l, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_db: +return {Intrinsic::xtensa_ae_db, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_db_ic: +return {Intrinsic::xtensa_ae_db_ic, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_db_ip: +return {Intrinsic::xtensa_ae_db_ip, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_dbi: +return {Intrinsic::xtensa_ae_dbi, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_dbi_ic: +return {Intrinsic::xtensa_ae_dbi_ic, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_dbi_ip: +return {Intrinsic::xtensa_ae_dbi_ip, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_div64d32_h: +return {Intrinsic::xtensa_ae_div64d32_h, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_div64d32_l: +return {Intrinsic::xtensa_ae_div64d32_l, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_eq16: +return {Intrinsic::xtensa_ae_eq16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_eq32: +return {Intrinsic::xtensa_ae_eq32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_eq64: +return {Intrinsic::xtensa_ae_eq64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16_i: +return {Intrinsic::xtensa_ae_l16_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16_ip: +return {Intrinsic::xtensa_ae_l16_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16_x: +return {Intrinsic::xtensa_ae_l16_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16_xc: +return {Intrinsic::xtensa_ae_l16_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16_xp: +return {Intrinsic::xtensa_ae_l16_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16m_i: +return {Intrinsic::xtensa_ae_l16m_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16m_iu: +return {Intrinsic::xtensa_ae_l16m_iu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16m_x: +return {Intrinsic::xtensa_ae_l16m_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16m_xc: +return {Intrinsic::xtensa_ae_l16m_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16m_xu: +return {Intrinsic::xtensa_ae_l16m_xu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_i: +return {Intrinsic::xtensa_ae_l16x2m_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_iu: +return {Intrinsic::xtensa_ae_l16x2m_iu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_x: +return {Intrinsic::xtensa_ae_l16x2m_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_xc: +return {Intrinsic::xtensa_ae_l16x2m_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_xu: +return {Intrinsic::xtensa_ae_l16x2m_xu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_i: +return {Intrinsic::xtensa_ae_l16x4_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_ip: +return {Intrinsic::xtensa_ae_l16x4_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_ric: +return {Intrinsic::xtensa_ae_l16x4_ric, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_rip: +return {Intrinsic::xtensa_ae_l16x4_rip, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_x: +return {Intrinsic::xtensa_ae_l16x4_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_xc: +return {Intrinsic::xtensa_ae_l16x4_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_xp: +return {Intrinsic::xtensa_ae_l16x4_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32_i: +return {Intrinsic::xtensa_ae_l32_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32_ip: +return {Intrinsic::xtensa_ae_l32_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32_x: +return {Intrinsic::xtensa_ae_l32_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32_xc: +return {Intrinsic::xtensa_ae_l32_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32_xp: +return {Intrinsic::xtensa_ae_l32_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_i: +return {Intrinsic::xtensa_ae_l32f24_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_ip: +return {Intrinsic::xtensa_ae_l32f24_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_x: +return {Intrinsic::xtensa_ae_l32f24_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_xc: +return {Intrinsic::xtensa_ae_l32f24_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_xp: +return {Intrinsic::xtensa_ae_l32f24_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32m_i: +return {Intrinsic::xtensa_ae_l32m_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32m_iu: +return {Intrinsic::xtensa_ae_l32m_iu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32m_x: +return {Intrinsic::xtensa_ae_l32m_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32m_xc: +return {Intrinsic::xtensa_ae_l32m_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32m_xu: +return {Intrinsic::xtensa_ae_l32m_xu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_i: +return {Intrinsic::xtensa_ae_l32x2_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_ip: +return {Intrinsic::xtensa_ae_l32x2_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_ric: +return {Intrinsic::xtensa_ae_l32x2_ric, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_rip: +return {Intrinsic::xtensa_ae_l32x2_rip, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_x: +return {Intrinsic::xtensa_ae_l32x2_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_xc: +return {Intrinsic::xtensa_ae_l32x2_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_xp: +return {Intrinsic::xtensa_ae_l32x2_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_i: +return {Intrinsic::xtensa_ae_l32x2f24_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_ip: +return {Intrinsic::xtensa_ae_l32x2f24_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_ric: +return {Intrinsic::xtensa_ae_l32x2f24_ric, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_rip: +return {Intrinsic::xtensa_ae_l32x2f24_rip, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_x: +return {Intrinsic::xtensa_ae_l32x2f24_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_xc: +return {Intrinsic::xtensa_ae_l32x2f24_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_xp: +return {Intrinsic::xtensa_ae_l32x2f24_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l64_i: +return {Intrinsic::xtensa_ae_l64_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l64_ip: +return {Intrinsic::xtensa_ae_l64_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l64_x: +return {Intrinsic::xtensa_ae_l64_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l64_xc: +return {Intrinsic::xtensa_ae_l64_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l64_xp: +return {Intrinsic::xtensa_ae_l64_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_la16x4_ic: +return {Intrinsic::xtensa_ae_la16x4_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la16x4_ip: +return {Intrinsic::xtensa_ae_la16x4_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la16x4_ric: +return {Intrinsic::xtensa_ae_la16x4_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la16x4_rip: +return {Intrinsic::xtensa_ae_la16x4_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la16x4neg_pc: +return {Intrinsic::xtensa_ae_la16x4neg_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la16x4pos_pc: +return {Intrinsic::xtensa_ae_la16x4pos_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la24_ic: +return {Intrinsic::xtensa_ae_la24_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24_ip: +return {Intrinsic::xtensa_ae_la24_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24_ric: +return {Intrinsic::xtensa_ae_la24_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24_rip: +return {Intrinsic::xtensa_ae_la24_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24neg_pc: +return {Intrinsic::xtensa_ae_la24neg_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la24pos_pc: +return {Intrinsic::xtensa_ae_la24pos_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la24x2_ic: +return {Intrinsic::xtensa_ae_la24x2_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24x2_ip: +return {Intrinsic::xtensa_ae_la24x2_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24x2_ric: +return {Intrinsic::xtensa_ae_la24x2_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24x2_rip: +return {Intrinsic::xtensa_ae_la24x2_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24x2neg_pc: +return {Intrinsic::xtensa_ae_la24x2neg_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la24x2pos_pc: +return {Intrinsic::xtensa_ae_la24x2pos_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la32x2_ic: +return {Intrinsic::xtensa_ae_la32x2_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2_ip: +return {Intrinsic::xtensa_ae_la32x2_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2_ric: +return {Intrinsic::xtensa_ae_la32x2_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2_rip: +return {Intrinsic::xtensa_ae_la32x2_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2f24_ic: +return {Intrinsic::xtensa_ae_la32x2f24_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2f24_ip: +return {Intrinsic::xtensa_ae_la32x2f24_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2f24_ric: +return {Intrinsic::xtensa_ae_la32x2f24_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2f24_rip: +return {Intrinsic::xtensa_ae_la32x2f24_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2neg_pc: +return {Intrinsic::xtensa_ae_la32x2neg_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la32x2pos_pc: +return {Intrinsic::xtensa_ae_la32x2pos_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la64_pp: +return {Intrinsic::xtensa_ae_la64_pp, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_lalign64_i: +return {Intrinsic::xtensa_ae_lalign64_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lb: +return {Intrinsic::xtensa_ae_lb, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_lbi: +return {Intrinsic::xtensa_ae_lbi, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_lbk: +return {Intrinsic::xtensa_ae_lbk, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lbki: +return {Intrinsic::xtensa_ae_lbki, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lbs: +return {Intrinsic::xtensa_ae_lbs, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_lbsi: +return {Intrinsic::xtensa_ae_lbsi, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_le16: +return {Intrinsic::xtensa_ae_le16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_le32: +return {Intrinsic::xtensa_ae_le32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_le64: +return {Intrinsic::xtensa_ae_le64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lt16: +return {Intrinsic::xtensa_ae_lt16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lt32: +return {Intrinsic::xtensa_ae_lt32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lt64: +return {Intrinsic::xtensa_ae_lt64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_max32: +return {Intrinsic::xtensa_ae_max32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_max64: +return {Intrinsic::xtensa_ae_max64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_maxabs32s: +return {Intrinsic::xtensa_ae_maxabs32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_maxabs64s: +return {Intrinsic::xtensa_ae_maxabs64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_min32: +return {Intrinsic::xtensa_ae_min32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_min64: +return {Intrinsic::xtensa_ae_min64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_minabs32s: +return {Intrinsic::xtensa_ae_minabs32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_minabs64s: +return {Intrinsic::xtensa_ae_minabs64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mov: +return {Intrinsic::xtensa_ae_mov, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad16_0: +return {Intrinsic::xtensa_ae_movad16_0, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad16_1: +return {Intrinsic::xtensa_ae_movad16_1, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad16_2: +return {Intrinsic::xtensa_ae_movad16_2, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad16_3: +return {Intrinsic::xtensa_ae_movad16_3, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad32_h: +return {Intrinsic::xtensa_ae_movad32_h, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad32_l: +return {Intrinsic::xtensa_ae_movad32_l, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movalign: +return {Intrinsic::xtensa_ae_movalign, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movda16: +return {Intrinsic::xtensa_ae_movda16, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movda16x2: +return {Intrinsic::xtensa_ae_movda16x2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_movda32: +return {Intrinsic::xtensa_ae_movda32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movda32x2: +return {Intrinsic::xtensa_ae_movda32x2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_movf16x4: +return {Intrinsic::xtensa_ae_movf16x4, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movf32x2: +return {Intrinsic::xtensa_ae_movf32x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movf64: +return {Intrinsic::xtensa_ae_movf64, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movi: +return {Intrinsic::xtensa_ae_movi, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movt16x4: +return {Intrinsic::xtensa_ae_movt16x4, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movt32x2: +return {Intrinsic::xtensa_ae_movt32x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movt64: +return {Intrinsic::xtensa_ae_movt64, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mul16x4: +return {Intrinsic::xtensa_ae_mul16x4, 0, 0xc0003}; +case Xtensa::BI__builtin_xtensa_ae_mul32_hh: +return {Intrinsic::xtensa_ae_mul32_hh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32_lh: +return {Intrinsic::xtensa_ae_mul32_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32_ll: +return {Intrinsic::xtensa_ae_mul32_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32_ll_s2: +return {Intrinsic::xtensa_ae_mul32_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32u_ll: +return {Intrinsic::xtensa_ae_mul32u_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h0: +return {Intrinsic::xtensa_ae_mul32x16_h0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h0_s2: +return {Intrinsic::xtensa_ae_mul32x16_h0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h1: +return {Intrinsic::xtensa_ae_mul32x16_h1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h1_s2: +return {Intrinsic::xtensa_ae_mul32x16_h1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h2: +return {Intrinsic::xtensa_ae_mul32x16_h2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h2_s2: +return {Intrinsic::xtensa_ae_mul32x16_h2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h3: +return {Intrinsic::xtensa_ae_mul32x16_h3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h3_s2: +return {Intrinsic::xtensa_ae_mul32x16_h3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l0: +return {Intrinsic::xtensa_ae_mul32x16_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l0_s2: +return {Intrinsic::xtensa_ae_mul32x16_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l1: +return {Intrinsic::xtensa_ae_mul32x16_l1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l1_s2: +return {Intrinsic::xtensa_ae_mul32x16_l1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l2: +return {Intrinsic::xtensa_ae_mul32x16_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l2_s2: +return {Intrinsic::xtensa_ae_mul32x16_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l3: +return {Intrinsic::xtensa_ae_mul32x16_l3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l3_s2: +return {Intrinsic::xtensa_ae_mul32x16_l3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mula16x4: +return {Intrinsic::xtensa_ae_mula16x4, 0, 0xc0300}; +case Xtensa::BI__builtin_xtensa_ae_mula32_hh: +return {Intrinsic::xtensa_ae_mula32_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32_lh: +return {Intrinsic::xtensa_ae_mula32_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32_ll: +return {Intrinsic::xtensa_ae_mula32_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32_ll_s2: +return {Intrinsic::xtensa_ae_mula32_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32u_ll: +return {Intrinsic::xtensa_ae_mula32u_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h0: +return {Intrinsic::xtensa_ae_mula32x16_h0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h0_s2: +return {Intrinsic::xtensa_ae_mula32x16_h0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h1: +return {Intrinsic::xtensa_ae_mula32x16_h1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h1_s2: +return {Intrinsic::xtensa_ae_mula32x16_h1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h2: +return {Intrinsic::xtensa_ae_mula32x16_h2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h2_s2: +return {Intrinsic::xtensa_ae_mula32x16_h2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h3: +return {Intrinsic::xtensa_ae_mula32x16_h3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h3_s2: +return {Intrinsic::xtensa_ae_mula32x16_h3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l0: +return {Intrinsic::xtensa_ae_mula32x16_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l0_s2: +return {Intrinsic::xtensa_ae_mula32x16_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l1: +return {Intrinsic::xtensa_ae_mula32x16_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l1_s2: +return {Intrinsic::xtensa_ae_mula32x16_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l2: +return {Intrinsic::xtensa_ae_mula32x16_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l2_s2: +return {Intrinsic::xtensa_ae_mula32x16_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l3: +return {Intrinsic::xtensa_ae_mula32x16_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l3_s2: +return {Intrinsic::xtensa_ae_mula32x16_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad24_hh_ll: +return {Intrinsic::xtensa_ae_mulaad24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulaad24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad24_hl_lh: +return {Intrinsic::xtensa_ae_mulaad24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulaad24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h0_l1: +return {Intrinsic::xtensa_ae_mulaad32x16_h0_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h0_l1_s2: +return {Intrinsic::xtensa_ae_mulaad32x16_h0_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulaad32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulaad32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h2_l3: +return {Intrinsic::xtensa_ae_mulaad32x16_h2_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h2_l3_s2: +return {Intrinsic::xtensa_ae_mulaad32x16_h2_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulaad32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulaad32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_11_00: +return {Intrinsic::xtensa_ae_mulaafd16ss_11_00, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_11_00_s2: +return {Intrinsic::xtensa_ae_mulaafd16ss_11_00_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_13_02: +return {Intrinsic::xtensa_ae_mulaafd16ss_13_02, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_13_02_s2: +return {Intrinsic::xtensa_ae_mulaafd16ss_13_02_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_33_22: +return {Intrinsic::xtensa_ae_mulaafd16ss_33_22, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_33_22_s2: +return {Intrinsic::xtensa_ae_mulaafd16ss_33_22_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd24_hh_ll: +return {Intrinsic::xtensa_ae_mulaafd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulaafd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd24_hl_lh: +return {Intrinsic::xtensa_ae_mulaafd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulaafd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h0_l1: +return {Intrinsic::xtensa_ae_mulaafd32x16_h0_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h0_l1_s2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h0_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulaafd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h2_l3: +return {Intrinsic::xtensa_ae_mulaafd32x16_h2_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h2_l3_s2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h2_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulac24: +return {Intrinsic::xtensa_ae_mulac24, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulac32x16_h: +return {Intrinsic::xtensa_ae_mulac32x16_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulac32x16_l: +return {Intrinsic::xtensa_ae_mulac32x16_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_00: +return {Intrinsic::xtensa_ae_mulaf16ss_00, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_00_s2: +return {Intrinsic::xtensa_ae_mulaf16ss_00_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_10: +return {Intrinsic::xtensa_ae_mulaf16ss_10, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_11: +return {Intrinsic::xtensa_ae_mulaf16ss_11, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_20: +return {Intrinsic::xtensa_ae_mulaf16ss_20, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_21: +return {Intrinsic::xtensa_ae_mulaf16ss_21, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_22: +return {Intrinsic::xtensa_ae_mulaf16ss_22, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_30: +return {Intrinsic::xtensa_ae_mulaf16ss_30, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_31: +return {Intrinsic::xtensa_ae_mulaf16ss_31, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_32: +return {Intrinsic::xtensa_ae_mulaf16ss_32, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_33: +return {Intrinsic::xtensa_ae_mulaf16ss_33, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16x4ss: +return {Intrinsic::xtensa_ae_mulaf16x4ss, 0, 0xc0300}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32r_hh: +return {Intrinsic::xtensa_ae_mulaf32r_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32r_lh: +return {Intrinsic::xtensa_ae_mulaf32r_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32r_ll: +return {Intrinsic::xtensa_ae_mulaf32r_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32r_ll_s2: +return {Intrinsic::xtensa_ae_mulaf32r_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32s_hh: +return {Intrinsic::xtensa_ae_mulaf32s_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32s_lh: +return {Intrinsic::xtensa_ae_mulaf32s_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32s_ll: +return {Intrinsic::xtensa_ae_mulaf32s_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32s_ll_s2: +return {Intrinsic::xtensa_ae_mulaf32s_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h0: +return {Intrinsic::xtensa_ae_mulaf32x16_h0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h0_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_h0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h1: +return {Intrinsic::xtensa_ae_mulaf32x16_h1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h1_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_h1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h2: +return {Intrinsic::xtensa_ae_mulaf32x16_h2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h2_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_h2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h3: +return {Intrinsic::xtensa_ae_mulaf32x16_h3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h3_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_h3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l0: +return {Intrinsic::xtensa_ae_mulaf32x16_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l0_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l1: +return {Intrinsic::xtensa_ae_mulaf32x16_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l1_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l2: +return {Intrinsic::xtensa_ae_mulaf32x16_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l2_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l3: +return {Intrinsic::xtensa_ae_mulaf32x16_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l3_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf48q32sp16s_l: +return {Intrinsic::xtensa_ae_mulaf48q32sp16s_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf48q32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulaf48q32sp16s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf48q32sp16u_l: +return {Intrinsic::xtensa_ae_mulaf48q32sp16u_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf48q32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulaf48q32sp16u_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafc24ra: +return {Intrinsic::xtensa_ae_mulafc24ra, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafc32x16ras_h: +return {Intrinsic::xtensa_ae_mulafc32x16ras_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafc32x16ras_l: +return {Intrinsic::xtensa_ae_mulafc32x16ras_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafd24x2_fir_h: +return {Intrinsic::xtensa_ae_mulafd24x2_fir_h, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd24x2_fir_l: +return {Intrinsic::xtensa_ae_mulafd24x2_fir_l, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd32x16x2_fir_hh: +return {Intrinsic::xtensa_ae_mulafd32x16x2_fir_hh, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd32x16x2_fir_hl: +return {Intrinsic::xtensa_ae_mulafd32x16x2_fir_hl, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd32x16x2_fir_lh: +return {Intrinsic::xtensa_ae_mulafd32x16x2_fir_lh, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd32x16x2_fir_ll: +return {Intrinsic::xtensa_ae_mulafd32x16x2_fir_ll, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafp24x2r: +return {Intrinsic::xtensa_ae_mulafp24x2r, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp24x2r_s2: +return {Intrinsic::xtensa_ae_mulafp24x2r_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp24x2ra: +return {Intrinsic::xtensa_ae_mulafp24x2ra, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp24x2ra_s2: +return {Intrinsic::xtensa_ae_mulafp24x2ra_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2ras_h: +return {Intrinsic::xtensa_ae_mulafp32x16x2ras_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2ras_h_s2: +return {Intrinsic::xtensa_ae_mulafp32x16x2ras_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2ras_l: +return {Intrinsic::xtensa_ae_mulafp32x16x2ras_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2ras_l_s2: +return {Intrinsic::xtensa_ae_mulafp32x16x2ras_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2rs_h: +return {Intrinsic::xtensa_ae_mulafp32x16x2rs_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2rs_h_s2: +return {Intrinsic::xtensa_ae_mulafp32x16x2rs_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2rs_l: +return {Intrinsic::xtensa_ae_mulafp32x16x2rs_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2rs_l_s2: +return {Intrinsic::xtensa_ae_mulafp32x16x2rs_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x2ras: +return {Intrinsic::xtensa_ae_mulafp32x2ras, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x2rs: +return {Intrinsic::xtensa_ae_mulafp32x2rs, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulafq32sp24s_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulafq32sp24s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap24x2: +return {Intrinsic::xtensa_ae_mulap24x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap24x2_s2: +return {Intrinsic::xtensa_ae_mulap24x2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap32x16x2_h: +return {Intrinsic::xtensa_ae_mulap32x16x2_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap32x16x2_l: +return {Intrinsic::xtensa_ae_mulap32x16x2_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap32x2: +return {Intrinsic::xtensa_ae_mulap32x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaq32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulaq32sp16s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaq32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulaq32sp16u_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mularfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mularfq32sp24s_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mularfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mularfq32sp24s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_hh: +return {Intrinsic::xtensa_ae_mulas32f48p16s_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_hh_s2: +return {Intrinsic::xtensa_ae_mulas32f48p16s_hh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_lh: +return {Intrinsic::xtensa_ae_mulas32f48p16s_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_lh_s2: +return {Intrinsic::xtensa_ae_mulas32f48p16s_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_ll: +return {Intrinsic::xtensa_ae_mulas32f48p16s_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_ll_s2: +return {Intrinsic::xtensa_ae_mulas32f48p16s_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd24_hh_ll: +return {Intrinsic::xtensa_ae_mulasd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulasd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd24_hl_lh: +return {Intrinsic::xtensa_ae_mulasd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulasd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulasd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulasd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulasd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulasd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd24_hh_ll: +return {Intrinsic::xtensa_ae_mulasfd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulasfd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd24_hl_lh: +return {Intrinsic::xtensa_ae_mulasfd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulasfd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulasfd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulasfd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulasfd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulasfd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulc24: +return {Intrinsic::xtensa_ae_mulc24, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulc32x16_h: +return {Intrinsic::xtensa_ae_mulc32x16_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulc32x16_l: +return {Intrinsic::xtensa_ae_mulc32x16_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_00: +return {Intrinsic::xtensa_ae_mulf16ss_00, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_00_s2: +return {Intrinsic::xtensa_ae_mulf16ss_00_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_10: +return {Intrinsic::xtensa_ae_mulf16ss_10, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_11: +return {Intrinsic::xtensa_ae_mulf16ss_11, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_20: +return {Intrinsic::xtensa_ae_mulf16ss_20, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_21: +return {Intrinsic::xtensa_ae_mulf16ss_21, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_22: +return {Intrinsic::xtensa_ae_mulf16ss_22, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_30: +return {Intrinsic::xtensa_ae_mulf16ss_30, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_31: +return {Intrinsic::xtensa_ae_mulf16ss_31, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_32: +return {Intrinsic::xtensa_ae_mulf16ss_32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_33: +return {Intrinsic::xtensa_ae_mulf16ss_33, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16x4ss: +return {Intrinsic::xtensa_ae_mulf16x4ss, 0, 0xc0003}; +case Xtensa::BI__builtin_xtensa_ae_mulf32r_hh: +return {Intrinsic::xtensa_ae_mulf32r_hh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32r_lh: +return {Intrinsic::xtensa_ae_mulf32r_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32r_ll: +return {Intrinsic::xtensa_ae_mulf32r_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32r_ll_s2: +return {Intrinsic::xtensa_ae_mulf32r_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32s_hh: +return {Intrinsic::xtensa_ae_mulf32s_hh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32s_lh: +return {Intrinsic::xtensa_ae_mulf32s_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32s_ll: +return {Intrinsic::xtensa_ae_mulf32s_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32s_ll_s2: +return {Intrinsic::xtensa_ae_mulf32s_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h0: +return {Intrinsic::xtensa_ae_mulf32x16_h0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h0_s2: +return {Intrinsic::xtensa_ae_mulf32x16_h0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h1: +return {Intrinsic::xtensa_ae_mulf32x16_h1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h1_s2: +return {Intrinsic::xtensa_ae_mulf32x16_h1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h2: +return {Intrinsic::xtensa_ae_mulf32x16_h2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h2_s2: +return {Intrinsic::xtensa_ae_mulf32x16_h2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h3: +return {Intrinsic::xtensa_ae_mulf32x16_h3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h3_s2: +return {Intrinsic::xtensa_ae_mulf32x16_h3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l0: +return {Intrinsic::xtensa_ae_mulf32x16_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l0_s2: +return {Intrinsic::xtensa_ae_mulf32x16_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l1: +return {Intrinsic::xtensa_ae_mulf32x16_l1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l1_s2: +return {Intrinsic::xtensa_ae_mulf32x16_l1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l2: +return {Intrinsic::xtensa_ae_mulf32x16_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l2_s2: +return {Intrinsic::xtensa_ae_mulf32x16_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l3: +return {Intrinsic::xtensa_ae_mulf32x16_l3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l3_s2: +return {Intrinsic::xtensa_ae_mulf32x16_l3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf48q32sp16s_l: +return {Intrinsic::xtensa_ae_mulf48q32sp16s_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf48q32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulf48q32sp16s_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf48q32sp16u_l: +return {Intrinsic::xtensa_ae_mulf48q32sp16u_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf48q32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulf48q32sp16u_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfc24ra: +return {Intrinsic::xtensa_ae_mulfc24ra, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfc32x16ras_h: +return {Intrinsic::xtensa_ae_mulfc32x16ras_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfc32x16ras_l: +return {Intrinsic::xtensa_ae_mulfc32x16ras_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfd24x2_fir_h: +return {Intrinsic::xtensa_ae_mulfd24x2_fir_h, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd24x2_fir_l: +return {Intrinsic::xtensa_ae_mulfd24x2_fir_l, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd32x16x2_fir_hh: +return {Intrinsic::xtensa_ae_mulfd32x16x2_fir_hh, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd32x16x2_fir_hl: +return {Intrinsic::xtensa_ae_mulfd32x16x2_fir_hl, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd32x16x2_fir_lh: +return {Intrinsic::xtensa_ae_mulfd32x16x2_fir_lh, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd32x16x2_fir_ll: +return {Intrinsic::xtensa_ae_mulfd32x16x2_fir_ll, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfp16x4ras: +return {Intrinsic::xtensa_ae_mulfp16x4ras, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp16x4s: +return {Intrinsic::xtensa_ae_mulfp16x4s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp24x2r: +return {Intrinsic::xtensa_ae_mulfp24x2r, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp24x2r_s2: +return {Intrinsic::xtensa_ae_mulfp24x2r_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp24x2ra: +return {Intrinsic::xtensa_ae_mulfp24x2ra, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp24x2ra_s2: +return {Intrinsic::xtensa_ae_mulfp24x2ra_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2ras_h: +return {Intrinsic::xtensa_ae_mulfp32x16x2ras_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2ras_h_s2: +return {Intrinsic::xtensa_ae_mulfp32x16x2ras_h_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2ras_l: +return {Intrinsic::xtensa_ae_mulfp32x16x2ras_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2ras_l_s2: +return {Intrinsic::xtensa_ae_mulfp32x16x2ras_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2rs_h: +return {Intrinsic::xtensa_ae_mulfp32x16x2rs_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2rs_h_s2: +return {Intrinsic::xtensa_ae_mulfp32x16x2rs_h_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2rs_l: +return {Intrinsic::xtensa_ae_mulfp32x16x2rs_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2rs_l_s2: +return {Intrinsic::xtensa_ae_mulfp32x16x2rs_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x2ras: +return {Intrinsic::xtensa_ae_mulfp32x2ras, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x2rs: +return {Intrinsic::xtensa_ae_mulfp32x2rs, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulfq32sp24s_h_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulfq32sp24s_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp24x2: +return {Intrinsic::xtensa_ae_mulp24x2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp24x2_s2: +return {Intrinsic::xtensa_ae_mulp24x2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp32x16x2_h: +return {Intrinsic::xtensa_ae_mulp32x16x2_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp32x16x2_l: +return {Intrinsic::xtensa_ae_mulp32x16x2_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp32x2: +return {Intrinsic::xtensa_ae_mulp32x2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulq32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulq32sp16s_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulq32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulq32sp16u_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulrfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulrfq32sp24s_h_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulrfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulrfq32sp24s_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls16x4: +return {Intrinsic::xtensa_ae_muls16x4, 0, 0xc0300}; +case Xtensa::BI__builtin_xtensa_ae_muls32_hh: +return {Intrinsic::xtensa_ae_muls32_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32_lh: +return {Intrinsic::xtensa_ae_muls32_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32_ll: +return {Intrinsic::xtensa_ae_muls32_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_hh: +return {Intrinsic::xtensa_ae_muls32f48p16s_hh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_hh_s2: +return {Intrinsic::xtensa_ae_muls32f48p16s_hh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_lh: +return {Intrinsic::xtensa_ae_muls32f48p16s_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_lh_s2: +return {Intrinsic::xtensa_ae_muls32f48p16s_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_ll: +return {Intrinsic::xtensa_ae_muls32f48p16s_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_ll_s2: +return {Intrinsic::xtensa_ae_muls32f48p16s_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32u_ll: +return {Intrinsic::xtensa_ae_muls32u_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h0: +return {Intrinsic::xtensa_ae_muls32x16_h0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h0_s2: +return {Intrinsic::xtensa_ae_muls32x16_h0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h1: +return {Intrinsic::xtensa_ae_muls32x16_h1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h1_s2: +return {Intrinsic::xtensa_ae_muls32x16_h1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h2: +return {Intrinsic::xtensa_ae_muls32x16_h2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h2_s2: +return {Intrinsic::xtensa_ae_muls32x16_h2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h3: +return {Intrinsic::xtensa_ae_muls32x16_h3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h3_s2: +return {Intrinsic::xtensa_ae_muls32x16_h3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l0: +return {Intrinsic::xtensa_ae_muls32x16_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l0_s2: +return {Intrinsic::xtensa_ae_muls32x16_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l1: +return {Intrinsic::xtensa_ae_muls32x16_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l1_s2: +return {Intrinsic::xtensa_ae_muls32x16_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l2: +return {Intrinsic::xtensa_ae_muls32x16_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l2_s2: +return {Intrinsic::xtensa_ae_muls32x16_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l3: +return {Intrinsic::xtensa_ae_muls32x16_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l3_s2: +return {Intrinsic::xtensa_ae_muls32x16_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad24_hh_ll: +return {Intrinsic::xtensa_ae_mulsad24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulsad24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulsad32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulsad32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulsad32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulsad32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd24_hh_ll: +return {Intrinsic::xtensa_ae_mulsafd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulsafd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulsafd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulsafd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulsafd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulsafd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_00: +return {Intrinsic::xtensa_ae_mulsf16ss_00, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_00_s2: +return {Intrinsic::xtensa_ae_mulsf16ss_00_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_10: +return {Intrinsic::xtensa_ae_mulsf16ss_10, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_11: +return {Intrinsic::xtensa_ae_mulsf16ss_11, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_20: +return {Intrinsic::xtensa_ae_mulsf16ss_20, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_21: +return {Intrinsic::xtensa_ae_mulsf16ss_21, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_22: +return {Intrinsic::xtensa_ae_mulsf16ss_22, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_30: +return {Intrinsic::xtensa_ae_mulsf16ss_30, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_31: +return {Intrinsic::xtensa_ae_mulsf16ss_31, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_32: +return {Intrinsic::xtensa_ae_mulsf16ss_32, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_33: +return {Intrinsic::xtensa_ae_mulsf16ss_33, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16x4ss: +return {Intrinsic::xtensa_ae_mulsf16x4ss, 0, 0xc0300}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32r_hh: +return {Intrinsic::xtensa_ae_mulsf32r_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32r_lh: +return {Intrinsic::xtensa_ae_mulsf32r_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32r_ll: +return {Intrinsic::xtensa_ae_mulsf32r_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32r_ll_s2: +return {Intrinsic::xtensa_ae_mulsf32r_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32s_hh: +return {Intrinsic::xtensa_ae_mulsf32s_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32s_lh: +return {Intrinsic::xtensa_ae_mulsf32s_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32s_ll: +return {Intrinsic::xtensa_ae_mulsf32s_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h0: +return {Intrinsic::xtensa_ae_mulsf32x16_h0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h0_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_h0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h1: +return {Intrinsic::xtensa_ae_mulsf32x16_h1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h1_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_h1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h2: +return {Intrinsic::xtensa_ae_mulsf32x16_h2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h2_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_h2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h3: +return {Intrinsic::xtensa_ae_mulsf32x16_h3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h3_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_h3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l0: +return {Intrinsic::xtensa_ae_mulsf32x16_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l0_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l1: +return {Intrinsic::xtensa_ae_mulsf32x16_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l1_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l2: +return {Intrinsic::xtensa_ae_mulsf32x16_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l2_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l3: +return {Intrinsic::xtensa_ae_mulsf32x16_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l3_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf48q32sp16s_l: +return {Intrinsic::xtensa_ae_mulsf48q32sp16s_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf48q32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulsf48q32sp16s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf48q32sp16u_l: +return {Intrinsic::xtensa_ae_mulsf48q32sp16u_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf48q32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulsf48q32sp16u_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp24x2r: +return {Intrinsic::xtensa_ae_mulsfp24x2r, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp24x2r_s2: +return {Intrinsic::xtensa_ae_mulsfp24x2r_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp24x2ra: +return {Intrinsic::xtensa_ae_mulsfp24x2ra, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp24x2ra_s2: +return {Intrinsic::xtensa_ae_mulsfp24x2ra_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2ras_h: +return {Intrinsic::xtensa_ae_mulsfp32x16x2ras_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2ras_h_s2: +return {Intrinsic::xtensa_ae_mulsfp32x16x2ras_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2ras_l: +return {Intrinsic::xtensa_ae_mulsfp32x16x2ras_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2ras_l_s2: +return {Intrinsic::xtensa_ae_mulsfp32x16x2ras_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2rs_h: +return {Intrinsic::xtensa_ae_mulsfp32x16x2rs_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2rs_h_s2: +return {Intrinsic::xtensa_ae_mulsfp32x16x2rs_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2rs_l: +return {Intrinsic::xtensa_ae_mulsfp32x16x2rs_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2rs_l_s2: +return {Intrinsic::xtensa_ae_mulsfp32x16x2rs_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x2ras: +return {Intrinsic::xtensa_ae_mulsfp32x2ras, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x2rs: +return {Intrinsic::xtensa_ae_mulsfp32x2rs, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulsfq32sp24s_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulsfq32sp24s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp24x2: +return {Intrinsic::xtensa_ae_mulsp24x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp24x2_s2: +return {Intrinsic::xtensa_ae_mulsp24x2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp32x16x2_h: +return {Intrinsic::xtensa_ae_mulsp32x16x2_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp32x16x2_l: +return {Intrinsic::xtensa_ae_mulsp32x16x2_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp32x2: +return {Intrinsic::xtensa_ae_mulsp32x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsq32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulsq32sp16s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsq32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulsq32sp16u_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsrfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulsrfq32sp24s_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsrfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulsrfq32sp24s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_hh: +return {Intrinsic::xtensa_ae_mulss32f48p16s_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_hh_s2: +return {Intrinsic::xtensa_ae_mulss32f48p16s_hh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_lh: +return {Intrinsic::xtensa_ae_mulss32f48p16s_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_lh_s2: +return {Intrinsic::xtensa_ae_mulss32f48p16s_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_ll: +return {Intrinsic::xtensa_ae_mulss32f48p16s_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_ll_s2: +return {Intrinsic::xtensa_ae_mulss32f48p16s_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd24_hh_ll: +return {Intrinsic::xtensa_ae_mulssd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulssd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd24_hl_lh: +return {Intrinsic::xtensa_ae_mulssd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulssd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulssd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulssd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulssd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulssd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_11_00: +return {Intrinsic::xtensa_ae_mulssfd16ss_11_00, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_11_00_s2: +return {Intrinsic::xtensa_ae_mulssfd16ss_11_00_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_13_02: +return {Intrinsic::xtensa_ae_mulssfd16ss_13_02, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_13_02_s2: +return {Intrinsic::xtensa_ae_mulssfd16ss_13_02_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_33_22: +return {Intrinsic::xtensa_ae_mulssfd16ss_33_22, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_33_22_s2: +return {Intrinsic::xtensa_ae_mulssfd16ss_33_22_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd24_hh_ll: +return {Intrinsic::xtensa_ae_mulssfd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulssfd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd24_hl_lh: +return {Intrinsic::xtensa_ae_mulssfd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulssfd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulssfd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulssfd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulssfd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulssfd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad24_hh_ll: +return {Intrinsic::xtensa_ae_mulzaad24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzaad24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad24_hl_lh: +return {Intrinsic::xtensa_ae_mulzaad24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzaad24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h0_l1: +return {Intrinsic::xtensa_ae_mulzaad32x16_h0_l1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h0_l1_s2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h0_l1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzaad32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h2_l3: +return {Intrinsic::xtensa_ae_mulzaad32x16_h2_l3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h2_l3_s2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h2_l3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_11_00: +return {Intrinsic::xtensa_ae_mulzaafd16ss_11_00, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_11_00_s2: +return {Intrinsic::xtensa_ae_mulzaafd16ss_11_00_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_13_02: +return {Intrinsic::xtensa_ae_mulzaafd16ss_13_02, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_13_02_s2: +return {Intrinsic::xtensa_ae_mulzaafd16ss_13_02_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_33_22: +return {Intrinsic::xtensa_ae_mulzaafd16ss_33_22, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_33_22_s2: +return {Intrinsic::xtensa_ae_mulzaafd16ss_33_22_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzaafd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzaafd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzaafd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzaafd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h0_l1: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h0_l1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h0_l1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h2_l3: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h2_l3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h2_l3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzasd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzasd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzasd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzasd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzasd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzasd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzasd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzasd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzasfd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzasfd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzasfd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzasfd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzasfd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzasfd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzasfd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzasfd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad24_hh_ll: +return {Intrinsic::xtensa_ae_mulzsad24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzsad24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzsad32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzsad32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzsad32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzsad32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzsafd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzsafd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzsafd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzsafd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzsafd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzsafd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzssd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzssd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzssd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzssd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzssd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzssd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzssd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzssd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_11_00: +return {Intrinsic::xtensa_ae_mulzssfd16ss_11_00, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_11_00_s2: +return {Intrinsic::xtensa_ae_mulzssfd16ss_11_00_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_13_02: +return {Intrinsic::xtensa_ae_mulzssfd16ss_13_02, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_13_02_s2: +return {Intrinsic::xtensa_ae_mulzssfd16ss_13_02_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_33_22: +return {Intrinsic::xtensa_ae_mulzssfd16ss_33_22, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_33_22_s2: +return {Intrinsic::xtensa_ae_mulzssfd16ss_33_22_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzssfd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzssfd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzssfd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzssfd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzssfd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzssfd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzssfd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzssfd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_nand: +return {Intrinsic::xtensa_ae_nand, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_neg16s: +return {Intrinsic::xtensa_ae_neg16s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg24s: +return {Intrinsic::xtensa_ae_neg24s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg32: +return {Intrinsic::xtensa_ae_neg32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg32s: +return {Intrinsic::xtensa_ae_neg32s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg64: +return {Intrinsic::xtensa_ae_neg64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg64s: +return {Intrinsic::xtensa_ae_neg64s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_nsa64: +return {Intrinsic::xtensa_ae_nsa64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_nsaz16_0: +return {Intrinsic::xtensa_ae_nsaz16_0, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_nsaz32_l: +return {Intrinsic::xtensa_ae_nsaz32_l, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_or: +return {Intrinsic::xtensa_ae_or, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_pksr24: +return {Intrinsic::xtensa_ae_pksr24, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_pksr32: +return {Intrinsic::xtensa_ae_pksr32, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_round16x4f32sasym: +return {Intrinsic::xtensa_ae_round16x4f32sasym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round16x4f32ssym: +return {Intrinsic::xtensa_ae_round16x4f32ssym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round24x2f48sasym: +return {Intrinsic::xtensa_ae_round24x2f48sasym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round24x2f48ssym: +return {Intrinsic::xtensa_ae_round24x2f48ssym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round32x2f48sasym: +return {Intrinsic::xtensa_ae_round32x2f48sasym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round32x2f48ssym: +return {Intrinsic::xtensa_ae_round32x2f48ssym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round32x2f64sasym: +return {Intrinsic::xtensa_ae_round32x2f64sasym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round32x2f64ssym: +return {Intrinsic::xtensa_ae_round32x2f64ssym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_roundsp16f24asym: +return {Intrinsic::xtensa_ae_roundsp16f24asym, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_roundsp16f24sym: +return {Intrinsic::xtensa_ae_roundsp16f24sym, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_roundsp16q48x2asym: +return {Intrinsic::xtensa_ae_roundsp16q48x2asym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_roundsp16q48x2sym: +return {Intrinsic::xtensa_ae_roundsp16q48x2sym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_roundsq32f48asym: +return {Intrinsic::xtensa_ae_roundsq32f48asym, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_roundsq32f48sym: +return {Intrinsic::xtensa_ae_roundsq32f48sym, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_i: +return {Intrinsic::xtensa_ae_s16_0_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_ip: +return {Intrinsic::xtensa_ae_s16_0_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_x: +return {Intrinsic::xtensa_ae_s16_0_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_xc: +return {Intrinsic::xtensa_ae_s16_0_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_xp: +return {Intrinsic::xtensa_ae_s16_0_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_i: +return {Intrinsic::xtensa_ae_s16m_l_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_iu: +return {Intrinsic::xtensa_ae_s16m_l_iu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_x: +return {Intrinsic::xtensa_ae_s16m_l_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_xc: +return {Intrinsic::xtensa_ae_s16m_l_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_xu: +return {Intrinsic::xtensa_ae_s16m_l_xu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_i: +return {Intrinsic::xtensa_ae_s16x2m_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_iu: +return {Intrinsic::xtensa_ae_s16x2m_iu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_x: +return {Intrinsic::xtensa_ae_s16x2m_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_xc: +return {Intrinsic::xtensa_ae_s16x2m_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_xu: +return {Intrinsic::xtensa_ae_s16x2m_xu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_i: +return {Intrinsic::xtensa_ae_s16x4_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_ip: +return {Intrinsic::xtensa_ae_s16x4_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_ric: +return {Intrinsic::xtensa_ae_s16x4_ric, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_rip: +return {Intrinsic::xtensa_ae_s16x4_rip, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_x: +return {Intrinsic::xtensa_ae_s16x4_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_xc: +return {Intrinsic::xtensa_ae_s16x4_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_xp: +return {Intrinsic::xtensa_ae_s16x4_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_i: +return {Intrinsic::xtensa_ae_s24ra64s_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_ip: +return {Intrinsic::xtensa_ae_s24ra64s_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_x: +return {Intrinsic::xtensa_ae_s24ra64s_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_xc: +return {Intrinsic::xtensa_ae_s24ra64s_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_xp: +return {Intrinsic::xtensa_ae_s24ra64s_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s24x2ra64s_ip: +return {Intrinsic::xtensa_ae_s24x2ra64s_ip, 0, 0x30400}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_i: +return {Intrinsic::xtensa_ae_s32_l_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_ip: +return {Intrinsic::xtensa_ae_s32_l_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_x: +return {Intrinsic::xtensa_ae_s32_l_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_xc: +return {Intrinsic::xtensa_ae_s32_l_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_xp: +return {Intrinsic::xtensa_ae_s32_l_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_i: +return {Intrinsic::xtensa_ae_s32f24_l_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_ip: +return {Intrinsic::xtensa_ae_s32f24_l_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_x: +return {Intrinsic::xtensa_ae_s32f24_l_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_xc: +return {Intrinsic::xtensa_ae_s32f24_l_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_xp: +return {Intrinsic::xtensa_ae_s32f24_l_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32m_i: +return {Intrinsic::xtensa_ae_s32m_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32m_iu: +return {Intrinsic::xtensa_ae_s32m_iu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32m_x: +return {Intrinsic::xtensa_ae_s32m_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32m_xc: +return {Intrinsic::xtensa_ae_s32m_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32m_xu: +return {Intrinsic::xtensa_ae_s32m_xu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_i: +return {Intrinsic::xtensa_ae_s32ra64s_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_ip: +return {Intrinsic::xtensa_ae_s32ra64s_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_x: +return {Intrinsic::xtensa_ae_s32ra64s_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_xc: +return {Intrinsic::xtensa_ae_s32ra64s_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_xp: +return {Intrinsic::xtensa_ae_s32ra64s_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_i: +return {Intrinsic::xtensa_ae_s32x2_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_ip: +return {Intrinsic::xtensa_ae_s32x2_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_ric: +return {Intrinsic::xtensa_ae_s32x2_ric, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_rip: +return {Intrinsic::xtensa_ae_s32x2_rip, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_x: +return {Intrinsic::xtensa_ae_s32x2_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_xc: +return {Intrinsic::xtensa_ae_s32x2_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_xp: +return {Intrinsic::xtensa_ae_s32x2_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_i: +return {Intrinsic::xtensa_ae_s32x2f24_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_ip: +return {Intrinsic::xtensa_ae_s32x2f24_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_ric: +return {Intrinsic::xtensa_ae_s32x2f24_ric, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_rip: +return {Intrinsic::xtensa_ae_s32x2f24_rip, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_x: +return {Intrinsic::xtensa_ae_s32x2f24_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_xc: +return {Intrinsic::xtensa_ae_s32x2f24_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_xp: +return {Intrinsic::xtensa_ae_s32x2f24_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2ra64s_ip: +return {Intrinsic::xtensa_ae_s32x2ra64s_ip, 0, 0x30400}; +case Xtensa::BI__builtin_xtensa_ae_s64_i: +return {Intrinsic::xtensa_ae_s64_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s64_ip: +return {Intrinsic::xtensa_ae_s64_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s64_x: +return {Intrinsic::xtensa_ae_s64_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s64_xc: +return {Intrinsic::xtensa_ae_s64_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s64_xp: +return {Intrinsic::xtensa_ae_s64_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_sa16x4_ic: +return {Intrinsic::xtensa_ae_sa16x4_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa16x4_ip: +return {Intrinsic::xtensa_ae_sa16x4_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa16x4_ric: +return {Intrinsic::xtensa_ae_sa16x4_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa16x4_rip: +return {Intrinsic::xtensa_ae_sa16x4_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24_l_ic: +return {Intrinsic::xtensa_ae_sa24_l_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24_l_ip: +return {Intrinsic::xtensa_ae_sa24_l_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24_l_ric: +return {Intrinsic::xtensa_ae_sa24_l_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24_l_rip: +return {Intrinsic::xtensa_ae_sa24_l_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24x2_ic: +return {Intrinsic::xtensa_ae_sa24x2_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24x2_ip: +return {Intrinsic::xtensa_ae_sa24x2_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24x2_ric: +return {Intrinsic::xtensa_ae_sa24x2_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24x2_rip: +return {Intrinsic::xtensa_ae_sa24x2_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2_ic: +return {Intrinsic::xtensa_ae_sa32x2_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2_ip: +return {Intrinsic::xtensa_ae_sa32x2_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2_ric: +return {Intrinsic::xtensa_ae_sa32x2_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2_rip: +return {Intrinsic::xtensa_ae_sa32x2_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2f24_ic: +return {Intrinsic::xtensa_ae_sa32x2f24_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2f24_ip: +return {Intrinsic::xtensa_ae_sa32x2f24_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2f24_ric: +return {Intrinsic::xtensa_ae_sa32x2f24_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2f24_rip: +return {Intrinsic::xtensa_ae_sa32x2f24_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa64neg_fp: +return {Intrinsic::xtensa_ae_sa64neg_fp, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_sa64pos_fp: +return {Intrinsic::xtensa_ae_sa64pos_fp, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_salign64_i: +return {Intrinsic::xtensa_ae_salign64_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_sat16x4: +return {Intrinsic::xtensa_ae_sat16x4, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sat24s: +return {Intrinsic::xtensa_ae_sat24s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sat48s: +return {Intrinsic::xtensa_ae_sat48s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_satq56s: +return {Intrinsic::xtensa_ae_satq56s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sb: +return {Intrinsic::xtensa_ae_sb, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_sb_ic: +return {Intrinsic::xtensa_ae_sb_ic, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_sb_ip: +return {Intrinsic::xtensa_ae_sb_ip, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_sbf: +return {Intrinsic::xtensa_ae_sbf, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_sbf_ic: +return {Intrinsic::xtensa_ae_sbf_ic, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_sbf_ip: +return {Intrinsic::xtensa_ae_sbf_ip, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_sbi: +return {Intrinsic::xtensa_ae_sbi, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_sbi_ic: +return {Intrinsic::xtensa_ae_sbi_ic, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_sbi_ip: +return {Intrinsic::xtensa_ae_sbi_ip, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_sel16i: +return {Intrinsic::xtensa_ae_sel16i, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_sel16i_n: +return {Intrinsic::xtensa_ae_sel16i_n, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_sext32: +return {Intrinsic::xtensa_ae_sext32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sext32x2d16_10: +return {Intrinsic::xtensa_ae_sext32x2d16_10, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sext32x2d16_32: +return {Intrinsic::xtensa_ae_sext32x2d16_32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sha32: +return {Intrinsic::xtensa_ae_sha32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_shortswap: +return {Intrinsic::xtensa_ae_shortswap, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slaa16s: +return {Intrinsic::xtensa_ae_slaa16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaa32: +return {Intrinsic::xtensa_ae_slaa32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaa32s: +return {Intrinsic::xtensa_ae_slaa32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaa64: +return {Intrinsic::xtensa_ae_slaa64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaa64s: +return {Intrinsic::xtensa_ae_slaa64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaaq56: +return {Intrinsic::xtensa_ae_slaaq56, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai16s: +return {Intrinsic::xtensa_ae_slai16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai24: +return {Intrinsic::xtensa_ae_slai24, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai24s: +return {Intrinsic::xtensa_ae_slai24s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai32: +return {Intrinsic::xtensa_ae_slai32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai32s: +return {Intrinsic::xtensa_ae_slai32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai64: +return {Intrinsic::xtensa_ae_slai64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai64s: +return {Intrinsic::xtensa_ae_slai64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaisq56s: +return {Intrinsic::xtensa_ae_slaisq56s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slas24: +return {Intrinsic::xtensa_ae_slas24, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas24s: +return {Intrinsic::xtensa_ae_slas24s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas32: +return {Intrinsic::xtensa_ae_slas32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas32s: +return {Intrinsic::xtensa_ae_slas32s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas64: +return {Intrinsic::xtensa_ae_slas64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas64s: +return {Intrinsic::xtensa_ae_slas64s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slasq56: +return {Intrinsic::xtensa_ae_slasq56, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slassq56s: +return {Intrinsic::xtensa_ae_slassq56s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sra64_32: +return {Intrinsic::xtensa_ae_sra64_32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa16rs: +return {Intrinsic::xtensa_ae_sraa16rs, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa16s: +return {Intrinsic::xtensa_ae_sraa16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa32: +return {Intrinsic::xtensa_ae_sraa32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa32rs: +return {Intrinsic::xtensa_ae_sraa32rs, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa32s: +return {Intrinsic::xtensa_ae_sraa32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa64: +return {Intrinsic::xtensa_ae_sraa64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai16: +return {Intrinsic::xtensa_ae_srai16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai16r: +return {Intrinsic::xtensa_ae_srai16r, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai24: +return {Intrinsic::xtensa_ae_srai24, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai32: +return {Intrinsic::xtensa_ae_srai32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai32r: +return {Intrinsic::xtensa_ae_srai32r, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai64: +return {Intrinsic::xtensa_ae_srai64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sras24: +return {Intrinsic::xtensa_ae_sras24, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sras32: +return {Intrinsic::xtensa_ae_sras32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sras64: +return {Intrinsic::xtensa_ae_sras64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_srla32: +return {Intrinsic::xtensa_ae_srla32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srla64: +return {Intrinsic::xtensa_ae_srla64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srli24: +return {Intrinsic::xtensa_ae_srli24, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srli32: +return {Intrinsic::xtensa_ae_srli32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srli64: +return {Intrinsic::xtensa_ae_srli64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srls24: +return {Intrinsic::xtensa_ae_srls24, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_srls32: +return {Intrinsic::xtensa_ae_srls32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_srls64: +return {Intrinsic::xtensa_ae_srls64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sub16: +return {Intrinsic::xtensa_ae_sub16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub16s: +return {Intrinsic::xtensa_ae_sub16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub24s: +return {Intrinsic::xtensa_ae_sub24s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub32: +return {Intrinsic::xtensa_ae_sub32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub32s: +return {Intrinsic::xtensa_ae_sub32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub64: +return {Intrinsic::xtensa_ae_sub64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub64s: +return {Intrinsic::xtensa_ae_sub64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_subadd32: +return {Intrinsic::xtensa_ae_subadd32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_subadd32s: +return {Intrinsic::xtensa_ae_subadd32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_trunca32f64s_l: +return {Intrinsic::xtensa_ae_trunca32f64s_l, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_trunca32x2f64s: +return {Intrinsic::xtensa_ae_trunca32x2f64s, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_trunci32f64s_l: +return {Intrinsic::xtensa_ae_trunci32f64s_l, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_trunci32x2f64s: +return {Intrinsic::xtensa_ae_trunci32x2f64s, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_vldl16c: +return {Intrinsic::xtensa_ae_vldl16c, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vldl16c_ic: +return {Intrinsic::xtensa_ae_vldl16c_ic, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vldl16c_ip: +return {Intrinsic::xtensa_ae_vldl16c_ip, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vldl16t: +return {Intrinsic::xtensa_ae_vldl16t, 0, 0x40003}; +case Xtensa::BI__builtin_xtensa_ae_vldl32t: +return {Intrinsic::xtensa_ae_vldl32t, 0, 0x40003}; +case Xtensa::BI__builtin_xtensa_ae_vldsht: +return {Intrinsic::xtensa_ae_vldsht, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_ae_vlel16t: +return {Intrinsic::xtensa_ae_vlel16t, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_vlel32t: +return {Intrinsic::xtensa_ae_vlel32t, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_vles16c: +return {Intrinsic::xtensa_ae_vles16c, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vles16c_ic: +return {Intrinsic::xtensa_ae_vles16c_ic, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vles16c_ip: +return {Intrinsic::xtensa_ae_vles16c_ip, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_xor: +return {Intrinsic::xtensa_ae_xor, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_zalign64: +return {Intrinsic::xtensa_ae_zalign64, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_bithead: +return {Intrinsic::xtensa_rur_ae_bithead, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_bitptr: +return {Intrinsic::xtensa_rur_ae_bitptr, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_bitsused: +return {Intrinsic::xtensa_rur_ae_bitsused, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_cbegin0: +return {Intrinsic::xtensa_rur_ae_cbegin0, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_cend0: +return {Intrinsic::xtensa_rur_ae_cend0, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_cw_sd_no: +return {Intrinsic::xtensa_rur_ae_cw_sd_no, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_cwrap: +return {Intrinsic::xtensa_rur_ae_cwrap, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_first_ts: +return {Intrinsic::xtensa_rur_ae_first_ts, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_nextoffset: +return {Intrinsic::xtensa_rur_ae_nextoffset, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_overflow: +return {Intrinsic::xtensa_rur_ae_overflow, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_ovf_sar: +return {Intrinsic::xtensa_rur_ae_ovf_sar, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_sar: +return {Intrinsic::xtensa_rur_ae_sar, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_searchdone: +return {Intrinsic::xtensa_rur_ae_searchdone, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_tablesize: +return {Intrinsic::xtensa_rur_ae_tablesize, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_ts_fts_bu_bp: +return {Intrinsic::xtensa_rur_ae_ts_fts_bu_bp, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_wur_ae_bithead: +return {Intrinsic::xtensa_wur_ae_bithead, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_bitptr: +return {Intrinsic::xtensa_wur_ae_bitptr, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_bitsused: +return {Intrinsic::xtensa_wur_ae_bitsused, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_cbegin0: +return {Intrinsic::xtensa_wur_ae_cbegin0, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_cend0: +return {Intrinsic::xtensa_wur_ae_cend0, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_cw_sd_no: +return {Intrinsic::xtensa_wur_ae_cw_sd_no, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_cwrap: +return {Intrinsic::xtensa_wur_ae_cwrap, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_first_ts: +return {Intrinsic::xtensa_wur_ae_first_ts, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_nextoffset: +return {Intrinsic::xtensa_wur_ae_nextoffset, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_overflow: +return {Intrinsic::xtensa_wur_ae_overflow, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_ovf_sar: +return {Intrinsic::xtensa_wur_ae_ovf_sar, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_sar: +return {Intrinsic::xtensa_wur_ae_sar, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_searchdone: +return {Intrinsic::xtensa_wur_ae_searchdone, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_tablesize: +return {Intrinsic::xtensa_wur_ae_tablesize, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_ts_fts_bu_bp: +return {Intrinsic::xtensa_wur_ae_ts_fts_bu_bp, 0, 0x10000}; diff --git a/clang/include/clang/Basic/XtensaSemaCheck.inc b/clang/include/clang/Basic/XtensaSemaCheck.inc new file mode 100644 index 0000000000000..58c267e103d46 --- /dev/null +++ b/clang/include/clang/Basic/XtensaSemaCheck.inc @@ -0,0 +1,215 @@ +//===-- XtensaSemaCheck.inc - Clang semantic checks for Xtensa arch ----*- C++ +//-*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +case Xtensa::BI__builtin_xtensa_ae_dbi: +return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_dbi_ic: +return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_dbi_ip: +return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_l16_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -16, 14) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 2); +case Xtensa::BI__builtin_xtensa_ae_l16_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_l16m_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -16, 14) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 2); +case Xtensa::BI__builtin_xtensa_ae_l16m_iu: +return SemaBuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_l16x2m_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 4); +case Xtensa::BI__builtin_xtensa_ae_l16x2m_iu: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_l16x4_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_l16x4_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_l32_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 4); +case Xtensa::BI__builtin_xtensa_ae_l32_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_l32f24_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 4); +case Xtensa::BI__builtin_xtensa_ae_l32f24_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_l32m_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 4); +case Xtensa::BI__builtin_xtensa_ae_l32m_iu: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_l32x2_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_l32x2_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_l64_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_l64_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_lalign64_i: +return SemaBuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_lbi: +return SemaBuiltinConstantArgRange(TheCall, 0, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_lbki: +return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_lbsi: +return SemaBuiltinConstantArgRange(TheCall, 0, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_movi: +return SemaBuiltinConstantArgRange(TheCall, 0, -16, 47); +case Xtensa::BI__builtin_xtensa_ae_pksr24: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); +case Xtensa::BI__builtin_xtensa_ae_pksr32: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); +case Xtensa::BI__builtin_xtensa_ae_s16_0_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_s16_0_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_s16m_l_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_s16m_l_iu: +return SemaBuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_s16x2m_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s16x2m_iu: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s16x4_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s16x4_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32_l_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32_l_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32m_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32m_iu: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32x2_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s32x2_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s64_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s64_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_salign64_i: +return SemaBuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_sbi: +return SemaBuiltinConstantArgRange(TheCall, 2, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_sbi_ic: +return SemaBuiltinConstantArgRange(TheCall, 2, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_sbi_ip: +return SemaBuiltinConstantArgRange(TheCall, 2, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_sel16i: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_sel16i_n: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); +case Xtensa::BI__builtin_xtensa_ae_sext32: +return SemaBuiltinConstantArgRange(TheCall, 1, 7, 22); +case Xtensa::BI__builtin_xtensa_ae_slai16s: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_slai24: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_slai24s: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_slai32: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_slai32s: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_slai64: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_slai64s: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_slaisq56s: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_srai16: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_srai16r: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_srai24: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srai32: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srai32r: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srai64: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_srli24: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srli32: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srli64: +return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_trunci32f64s_l: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_trunci32x2f64s: +return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index 5845ac1194666..7e591bd04be37 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -13641,7 +13641,8 @@ class Sema final { bool CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckXtensaBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); - + bool SemaBuiltinXtensaConversion(unsigned BuiltinID, CallExpr *TheCall); + bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); diff --git a/clang/lib/Basic/Targets/Xtensa.cpp b/clang/lib/Basic/Targets/Xtensa.cpp index 3bc8cc531069d..7b5346062bccb 100644 --- a/clang/lib/Basic/Targets/Xtensa.cpp +++ b/clang/lib/Basic/Targets/Xtensa.cpp @@ -24,6 +24,8 @@ static constexpr Builtin::Info BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) \ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES}, #include "clang/Basic/BuiltinsXtensa.def" +#include "clang/Basic/BuiltinsXtensaHIFI.def" +#undef BUILTIN }; ArrayRef XtensaTargetInfo::getTargetBuiltins() const { @@ -75,6 +77,8 @@ bool XtensaTargetInfo::hasFeature(StringRef Feature) const { return llvm::StringSwitch(Feature) .Case("fp", HasFP) .Case("windowed", HasWindowed) + .Case("bool", HasBoolean) + .Case("hifi3", HasHIFI3) .Default(false); } @@ -84,8 +88,12 @@ bool XtensaTargetInfo::handleTargetFeatures(std::vector &Features, for (const auto &Feature : Features) { if (Feature == "+fp") HasFP = true; + else if (Feature == "+bool") + HasBoolean = true; else if (Feature == "+windowed") HasWindowed = true; + else if (Feature == "+hifi3") + HasHIFI3 = true; } return true; diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index 1d777f9014d12..b2c923b2cd24a 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -33,6 +33,8 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { std::string CPU; bool HasFP = false; bool HasWindowed = false; + bool HasBoolean = false; + bool HasHIFI3 = false; public: XtensaTargetInfo(const llvm::Triple &Triple, const TargetOptions &) @@ -49,7 +51,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { WIntType = UnsignedInt; UseZeroLengthBitfieldAlignment = true; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; - resetDataLayout("e-m:e-p:32:32-i64:64-i128:128-n32"); + resetDataLayout("e-m:e-p:32:32-v1:8:8-i64:64-i128:128-n32"); } void getTargetDefines(const LangOptions &Opts, diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 30f5f4e7061c0..9c43677c51e13 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -52,6 +52,7 @@ #include "llvm/IR/IntrinsicsVE.h" #include "llvm/IR/IntrinsicsWebAssembly.h" #include "llvm/IR/IntrinsicsX86.h" +#include "llvm/IR/IntrinsicsXtensa.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/MatrixBuilder.h" #include "llvm/Support/ConvertUTF.h" @@ -5591,6 +5592,8 @@ static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, case llvm::Triple::loongarch32: case llvm::Triple::loongarch64: return CGF->EmitLoongArchBuiltinExpr(BuiltinID, E); + case llvm::Triple::xtensa: + return CGF->EmitXtensaBuiltinExpr(BuiltinID, E, ReturnValue, Arch); default: return nullptr; } @@ -20544,3 +20547,244 @@ Value *CodeGenFunction::EmitLoongArchBuiltinExpr(unsigned BuiltinID, llvm::Function *F = CGM.getIntrinsic(ID); return Builder.CreateCall(F, Ops); } + + +struct XtensaIntrinsicInfo { + unsigned IntrinsicID; + unsigned Kind; + unsigned Arg; +}; + +static XtensaIntrinsicInfo GetXtensaIntrinsic(unsigned BuiltinID) { + switch (BuiltinID) { + case Xtensa::BI__builtin_xtensa_xt_lsip: + return {Intrinsic::xtensa_xt_lsip, 2, 0x20100}; + case Xtensa::BI__builtin_xtensa_xt_lsxp: + return {Intrinsic::xtensa_xt_lsxp, 2, 0x20100}; +#include "clang/Basic/XtensaBuiltins.inc" + default: + llvm_unreachable("unexpected builtin ID"); + } +} + +llvm::Value *CodeGenFunction::ConvertXtensaToC(Value *val, + llvm::Type *destType) { + Value *argCast; + llvm::Type *valType = val->getType(); + + if (valType != destType) { // i32 to C short or char + argCast = Builder.CreateTruncOrBitCast(val, destType, "cast"); + return argCast; + } else { + return val; + } +} + +llvm::Value *CodeGenFunction::ConvertXtensaToBc(const Expr *ArgExpr, + llvm::Type *destType) { + + Value *ArgVal = EmitScalarExpr(ArgExpr); + Value *ArgCast = ArgVal; + llvm::Type *ArgType = ArgVal->getType(); + bool sign = ArgExpr->getType()->isSignedIntegerType(); + + if (ArgType != destType) { // short,char + if (sign) + ArgCast = Builder.CreateSExtOrBitCast(ArgVal, destType, "cast"); + else + ArgCast = Builder.CreateZExtOrBitCast(ArgVal, destType, "cast"); + } + return ArgCast; +} +llvm::Value * +CodeGenFunction::EmitXtensaConversionExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { + unsigned MaxElems; + switch (BuiltinID) { + case Xtensa::BI__builtin_xtensa_ae_int32x2: + MaxElems = 2; + break; + case Xtensa::BI__builtin_xtensa_ae_int32: + MaxElems = 1; + break; + default: + llvm_unreachable("Unknown intrinsic ID"); + } + + Value *ArgVal = EmitScalarExpr(E->getArg(0)); + QualType QT = E->getArg(0)->getType(); + if (auto *VecTy = QT->getAs()) { + unsigned NumEl = VecTy->getNumElements(); + llvm::Type *ElType = ConvertType(VecTy->getElementType()); + if (ElType != Int32Ty || NumEl > MaxElems) { + CGM.Error(E->getExprLoc(), "Expected int32x1 or int32x2"); + return ArgVal; + } + if (NumEl == MaxElems) + return ArgVal; // no-op + int Mask[] = {0,0}; + Value *Result = + Builder.CreateShuffleVector(ArgVal, ArgVal, ArrayRef(Mask, MaxElems)); + return Result; + } else if (QT->isIntegerType()) { + Value *Int32Val = (QT->isSignedIntegerType()) + ? Builder.CreateSExtOrTrunc(ArgVal, Int32Ty, "cast") + : Builder.CreateZExtOrTrunc(ArgVal, Int32Ty, "cast"); + Value *VecOps[] = {Int32Val,Int32Val}; + Value *Result = BuildVector(ArrayRef(VecOps, MaxElems)); + return Result; + } + llvm_unreachable("Invalid Argument type"); +} + +llvm::Value * +CodeGenFunction::EmitXtensaBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { + + switch (BuiltinID) { + case Xtensa::BI__builtin_xtensa_ae_int32x2: + case Xtensa::BI__builtin_xtensa_ae_int32: + return EmitXtensaConversionExpr(BuiltinID, E, ReturnValue, Arch); + default: + break; + }; + + XtensaIntrinsicInfo Info = GetXtensaIntrinsic(BuiltinID); + unsigned Intrinsic = Info.IntrinsicID; + + llvm::Function *F = CGM.getIntrinsic(Intrinsic); + + switch (Info.Kind) { + case 0: { + // void case + // + // void builtin(t1 *out /*out*/,..,t2 *inout, ..., t3 in, ..,) => + // load t2 inout, ... + // {t1 out1, ..., t2 inout, ... ,} = func(t2 inout, ..., t3 in, ...) + // store (extractvalue 0) t1, .. + + SmallVector Out; + SmallVector Inout; + SmallVector In; + SmallVector OutAddr; + + unsigned Code = Info.Arg; + unsigned CodeOut = Code & 0xff; + unsigned CodeInout = (Code >> 8) & 0xff; + unsigned CodeIn = (Code >> 16) & 0xff; + + for (unsigned i = 0; i < 8; ++i) { + if (CodeOut & (1 << i)) + Out.push_back(i); + if (CodeInout & (1 << i)) + Inout.push_back(i); + if (CodeIn & (1 << i)) + In.push_back(i); + } + + size_t asize = Inout.size() + In.size(); + SmallVector Args(asize, nullptr); + assert(Args.size() == asize); + + for (uint8_t idx : In) { + unsigned funArg = idx - Out.size(); + llvm::Type *destType = F->getArg(funArg)->getType(); + Args[funArg] = ConvertXtensaToBc(E->getArg(idx), destType); + } + + for (unsigned i = 0; i < Out.size(); ++i) { + unsigned idx = Out[i]; + Address AIn = EmitPointerWithAlignment(E->getArg(idx)); + Address AOut = AIn; + OutAddr.push_back(AOut); + } + + for (uint8_t idx : Inout) { + uint8_t FIdx = idx - Out.size(); + Address AIn = EmitPointerWithAlignment(E->getArg(idx)); + Address AOut = AIn; + OutAddr.push_back(AOut); + Value *Ptr = Builder.CreateLoad(AOut); + Args[FIdx] = Ptr; + } + + for (auto a : Args) + assert(a != nullptr); + + Value *Val = Builder.CreateCall(F, Args); + Value *Val0 = nullptr; + // check if out is a struct + if ((OutAddr.size() > 1)) { + for (unsigned i = 0; i < OutAddr.size(); ++i) { + Value *Out = Builder.CreateExtractValue(Val, i); + if (!Val0) // return the first value + Val0 = Out; + Address Addr = OutAddr[i]; + llvm::Type *DestType = Addr.getElementType(); + Value *OutConv = ConvertXtensaToC(Out, DestType); + Builder.CreateStore(OutConv, Addr); + } + } else if (OutAddr.size() == 1) { + Builder.CreateStore(Val, OutAddr[0]); + Val0 = Val; + } + assert(Val0); + return Val0; + } + case 1: { + // t_out bultin(t1 in1, t2 in2, ...) => + // t_out out1 = BcToXt( func(XtToBc(t1), XtToBc(t2), ...) ) + unsigned Code = Info.Arg; + uint8_t CodeOut = Code & 0xff; + uint8_t CodeInout = (Code >> 8) & 0xff; + uint8_t CodeIn = (Code >> 16) & 0xff; + + SmallVector In; + + assert(CodeOut == 1 && CodeInout == 0 && "Invalid signature"); + for (unsigned i = 0; i < 8; ++i) { + if (CodeIn & (1 << i)) + In.push_back(i); + } + SmallVector Args(In.size(), nullptr); + for (uint8_t idx : In) { + uint8_t aIdx = idx - 1; + llvm::Type *destType = F->getArg(aIdx)->getType(); + Args[aIdx] = ConvertXtensaToBc(E->getArg(aIdx), destType); + } + Value *Val = Builder.CreateCall(F, Args, "retval"); + llvm::Type *ResultType = ConvertType(E->getType()); + Value *ValConv = ConvertXtensaToC(Val, ResultType); + return ValConv; + } + case 2: { + // 1st argument is passed by pointer + /* float lsip(float **a, int off) => float p = *a + ret, p' = @int.xtensa.lsip(p, off) + *a = p' + */ + auto InoutPtrTy = F->getArg(0)->getType()->getPointerTo(); + Address InoutPtrAddr = EmitPointerWithAlignment(E->getArg(0)) + .withElementType(InoutPtrTy); + + unsigned NumArgs = E->getNumArgs(); + Value *InoutVal = Builder.CreateLoad(InoutPtrAddr); + SmallVector Args; + + Args.push_back(InoutVal); + for (unsigned i = 1; i < NumArgs; i++) + Args.push_back(EmitScalarExpr(E->getArg(i))); + + Value *Val = Builder.CreateCall(F, Args, "retval"); + Value *Val0 = Builder.CreateExtractValue(Val, 0); + Value *Val1 = Builder.CreateExtractValue(Val, 1); + // ret store + Builder.CreateStore(Val1, InoutPtrAddr); + return Val0; + } + default: + llvm_unreachable("unknown intrinsic kind"); + } +} diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 8722fd4550e4a..b4eccca8da80b 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4419,6 +4419,14 @@ class CodeGenFunction : public CodeGenTypeCache { /// Emits a reference binding to the passed in expression. RValue EmitReferenceBindingToExpr(const Expr *E); + llvm::Value *ConvertXtensaToBc(const Expr *Arg, llvm::Type *destType); + llvm::Value *ConvertXtensaToC(llvm::Value *arg, llvm::Type *destType); + llvm::Value *EmitXtensaBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); + llvm::Value *EmitXtensaConversionExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); //===--------------------------------------------------------------------===// // Expression Emission //===--------------------------------------------------------------------===// diff --git a/clang/lib/CodeGen/Targets/Xtensa.cpp b/clang/lib/CodeGen/Targets/Xtensa.cpp index f3e0170dda6d2..b615ce7913526 100644 --- a/clang/lib/CodeGen/Targets/Xtensa.cpp +++ b/clang/lib/CodeGen/Targets/Xtensa.cpp @@ -11,6 +11,7 @@ using namespace clang; using namespace clang::CodeGen; + //===----------------------------------------------------------------------===// // Xtensa ABI Implementation //===----------------------------------------------------------------------===// @@ -98,6 +99,26 @@ ABIArgInfo XtensaABIInfo::classifyArgumentType(QualType Ty, return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); } + // xtbool + if (getTarget().hasFeature("bool") && Size <= 8 && Ty->isVectorType()) { + // The type size is rounded up to the power of two and at least 8 bits, + // so we need to get the "true" size from num of vector elements + const VectorType *VT = Ty->getAs(); + unsigned NumBits = VT->getNumElements(); + llvm::Type *ResType = + llvm::FixedVectorType::get(llvm::Type::getInt1Ty(getVMContext()), NumBits); + return ABIArgInfo::getDirect(ResType); + } + // Vector arguments + if (getTarget().hasFeature("hifi3") && Ty->isVectorType() && (Size <= 64)) { + const VectorType *VT = Ty->getAs(); + QualType EltTy = VT->getElementType(); + unsigned EltSize = getContext().getTypeSize(EltTy); + if (EltSize == 8) // VAlign + return ABIArgInfo::getDirect( + llvm::IntegerType::get(getVMContext(), Size)); + return ABIArgInfo::getDirectInReg(); + } // Aggregates which are <= 6*32 will be passed in registers if possible, // so coerce to integers. if ((Size <= (MaxNumArgGPRs * 32)) && (!MustUseStack)) { @@ -239,7 +260,6 @@ class XtensaTargetCodeGenInfo : public TargetCodeGenInfo { }; } // namespace - std::unique_ptr CodeGen::createXtensaTargetCodeGenInfo(CodeGenModule &CGM) { return std::make_unique(CGM.getTypes()); diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 36f3e4a152696..1d8becf90a979 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -264,6 +264,9 @@ void tools::xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, if (!A->getOption().matches(options::OPT_g0)) CmdArgs.push_back("-g"); + if (Args.getLastArg(options::OPT_mtext_section_literals)) + CmdArgs.push_back("--text-section-literals"); + if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm, false)) CmdArgs.push_back("-fverbose-asm"); diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt index f2b0c5cddcbbf..f09a65fbee587 100644 --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -234,6 +234,11 @@ set(x86_files cpuid.h ) +set(xtensa_files + xtensa_defs.h + xtensa_protos.h +) + set(windows_only_files intrin.h vadefs.h @@ -261,6 +266,7 @@ set(files ${systemz_files} ${ve_files} ${x86_files} + ${xtensa_files} ${webassembly_files} ${windows_only_files} ${utility_files} @@ -463,6 +469,7 @@ add_header_target("systemz-resource-headers" "${systemz_files}") add_header_target("ve-resource-headers" "${ve_files}") add_header_target("webassembly-resource-headers" "${webassembly_files}") add_header_target("x86-resource-headers" "${x86_files}") +add_header_target("xtensa-resource-headers" "${xtensa_files}") # Other header groupings add_header_target("hlsl-resource-headers" ${hlsl_files}) @@ -633,7 +640,13 @@ if(NOT CLANG_ENABLE_HLSL) endif() install( - FILES ${hlsl_h} + FILES ${xtensa_files} + DESTINATION ${header_install_dir} + EXCLUDE_FROM_ALL + COMPONENT xtensa-resource-headers) + +install( + FILES ${hlsl_files} DESTINATION ${header_install_dir} ${EXCLUDE_HLSL} COMPONENT hlsl-resource-headers) diff --git a/clang/lib/Headers/xtensa_defs.h b/clang/lib/Headers/xtensa_defs.h new file mode 100644 index 0000000000000..d47cacf71803b --- /dev/null +++ b/clang/lib/Headers/xtensa_defs.h @@ -0,0 +1,66 @@ +/*===---- xtensa_defs.h - Xtensa definitions -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __XTENSAHIFI3_H +#define __XTENSAHIFI3_H + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__((vector_size(4))); +typedef int ae_int32x2 __attribute__((vector_size(8))); +typedef short ae_int16 __attribute__((vector_size(2))); +typedef short ae_int16x2 __attribute__((vector_size(4))); +typedef short ae_int16x4 __attribute__((vector_size(8))); +typedef long long ae_int64 __attribute__((vector_size(8))); +typedef unsigned char ae_valign __attribute__((vector_size(8))); +typedef ae_int16x4 ae_f16x4; +typedef ae_int32x2 ae_f32x2; +typedef ae_int32 ae_f32; +typedef ae_int64 ae_f64; +typedef ae_int32 ae_f24; +typedef ae_int32x2 ae_f24x2; +typedef ae_int16 ae_f16; + +#include + +#define AE_SETCBEGIN0(x) WUR_AE_CBEGIN0(x) +#define AE_SETCEND0(x) WUR_AE_CEND0(x) +#define AE_ZERO64(x) AE_MOVI(0) +#define AE_ZERO24(x) AE_MOVI(0) +#define AE_ZERO32(x) AE_MOVI(0) +#define AE_ZERO16(x) AE_MOVI(0) +#define AE_ZEROQ56(x) AE_ZERO64(x) + +#define AE_SEL32_L(a) \ + ({ \ + ae_int32x2 _a = a; \ + __builtin_shufflevector(_a, _a, 0); \ + }) + +#define AE_INT32(a) __builtin_xtensa_ae_int32(a); +#define AE_INT32X2(a) __builtin_xtensa_ae_int32x2(a); + +#define AE_F32X2 AE_INT32X2 +#define AE_F32 AE_INT32 + +#define AE_MOVINT16X4_FROMINT32X2(a) ((ae_int32x2)(a)) + +#define AE_F32_ADDS_F32(s1, s2) \ + AE_F32(AE_ADD32S(AE_INT32X2(s1), AE_INT32X2(s2))) + +typedef float xtfloat; + +#define XT_xtfloat_storeip(x, a, i) ({ a = __builtin_xtensa_xt_ssip(x, a, i); }) +#define XT_xtfloat_loadip(x, a, i) \ + ({ x = __builtin_xtensa_xt_lsip((xtfloat **)&a, i); }) +#define XT_xtfloat_loadxp(x, a, i) \ + ({ x = __builtin_xtensa_xt_lsxp((xtfloat **)&a, i); }) + +#endif /* __XTENSAHIFI3_H */ \ No newline at end of file diff --git a/clang/lib/Headers/xtensa_protos.h b/clang/lib/Headers/xtensa_protos.h new file mode 100644 index 0000000000000..0cc75cc8431ab --- /dev/null +++ b/clang/lib/Headers/xtensa_protos.h @@ -0,0 +1,6894 @@ + +/*===---- xtensa_protos.h - Xtensa intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __XTENSAHIFI3INTRIN_H +#define __XTENSAHIFI3INTRIN_H + +#define AE_ABS16S(ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs16s((__ae_arth_v1)); \ + }) + +#define AE_ABS24S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs24s((__ae_arth_v1)); \ + }) + +#define AE_ABS32(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs32((__ae_arth_v1)); \ + }) + +#define AE_ABS32S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs32s((__ae_arth_v1)); \ + }) + +#define AE_ABS64(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs64((__ae_arth_v1)); \ + }) + +#define AE_ABS64S(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs64s((__ae_arth_v1)); \ + }) + +#define AE_ADD16(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v0 = (ae_arth_v0); \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add16((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD16S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v0 = (ae_arth_v0); \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add16s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD24S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add24s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD32(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add32((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD32_HL_LH(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add32_hl_lh((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD32S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add32s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD64(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add64((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD64S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add64s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADDBRBA32(art, ars) \ + ({ \ + int __art = (int)(art); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_addbrba32((__art), (__ars)); \ + }) + +#define AE_ADDSUB32(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_addsub32((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADDSUB32S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_addsub32s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_AND(ae_dr_to_dr_v0, ae_dr_to_dr_v1) \ + ({ \ + ae_int64 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int64 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_and((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1)); \ + }) + +#define AE_CVT32X2F16_10(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_cvt32x2f16_10((__ae_to_dr_v0)); \ + }) + +#define AE_CVT32X2F16_32(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_cvt32x2f16_32((__ae_to_dr_v0)); \ + }) + +#define AE_CVT48A32(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_cvt48a32((__ars)); \ + }) + +#define AE_CVT64A32(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_cvt64a32((__ars)); \ + }) + +#define AE_CVT64F32_H(ae_dr_to_dr_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + __builtin_xtensa_ae_cvt64f32_h((__ae_dr_to_dr_v0)); \ + }) + +#define AE_CVTA32F24S_H(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_cvta32f24s_h((__ae_dr_to_ar_v0)); \ + }) + +#define AE_CVTA32F24S_L(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_cvta32f24s_l((__ae_dr_to_ar_v0)); \ + }) + +#define AE_CVTQ56A32S(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_cvtq56a32s((__ars)); \ + }) + +#define AE_CVTQ56P32S_H(ae_dr_to_dr_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + __builtin_xtensa_ae_cvtq56p32s_h((__ae_dr_to_dr_v0)); \ + }) + +#define AE_CVTQ56P32S_L(ae_dr_to_dr_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + __builtin_xtensa_ae_cvtq56p32s_l((__ae_dr_to_dr_v0)); \ + }) + +#define AE_DB(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_db((const short **)&(ars), (__art)); \ + }) + +#define AE_DB_IC(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_db_ic((const short **)&(ars), (__art)); \ + }) + +#define AE_DB_IP(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_db_ip((const short **)&(ars), (__art)); \ + }) + +#define AE_DBI(ars, ae_ohba) \ + ({ __builtin_xtensa_ae_dbi((const short **)&(ars), (ae_ohba)); }) + +#define AE_DBI_IC(ars, ae_ohba) \ + ({ __builtin_xtensa_ae_dbi_ic((const short **)&(ars), (ae_ohba)); }) + +#define AE_DBI_IP(ars, ae_ohba) \ + ({ __builtin_xtensa_ae_dbi_ip((const short **)&(ars), (ae_ohba)); }) + +#define AE_DIV64D32_H(ae_arth_v, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_div64d32_h((ae_int64 *)&(ae_arth_v), (__ae_arth_v1)); \ + }) + +#define AE_DIV64D32_L(ae_arth_v, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_div64d32_l((ae_int64 *)&(ae_arth_v), (__ae_arth_v1)); \ + }) + +#define AE_EQ16(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int16x4 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int16x4 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_eq16((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_EQ32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_eq32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_EQ64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_eq64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_L16_I(ars, ae_immls16) \ + ({ \ + ae_int16 *__ars = (ars); \ + __builtin_xtensa_ae_l16_i((__ars), (ae_immls16)); \ + }) + +#define AE_L16_IP(ae_ls_v, ars, ae_immls16) \ + ({ \ + __builtin_xtensa_ae_l16_ip((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (ae_immls16)); \ + }) + +#define AE_L16_X(ars, art) \ + ({ \ + ae_int16 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16_x((__ars), (__art)); \ + }) + +#define AE_L16_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16_xc((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_L16_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16_xp((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_L16M_I(ars, ae_immls16) \ + ({ \ + ae_int16 *__ars = (ars); \ + __builtin_xtensa_ae_l16m_i((__ars), (ae_immls16)); \ + }) + +#define AE_L16M_IU(ae_ls_v, ars, ae_immls16) \ + ({ \ + __builtin_xtensa_ae_l16m_iu((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (ae_immls16)); \ + }) + +#define AE_L16M_X(ars, art) \ + ({ \ + ae_int16 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16m_x((__ars), (__art)); \ + }) + +#define AE_L16M_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16m_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_L16M_XU(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16m_xu((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_L16X2M_I(ars, ae_immls32) \ + ({ \ + ae_int16x2 *__ars = (ars); \ + __builtin_xtensa_ae_l16x2m_i((__ars), (ae_immls32)); \ + }) + +#define AE_L16X2M_IU(ae_ls_v, ars, ae_immls32) \ + ({ \ + __builtin_xtensa_ae_l16x2m_iu((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16x2 **)&(ars), (ae_immls32)); \ + }) + +#define AE_L16X2M_X(ars, art) \ + ({ \ + ae_int16x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x2m_x((__ars), (__art)); \ + }) + +#define AE_L16X2M_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x2m_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16x2 **)&(ars), (__art)); \ + }) + +#define AE_L16X2M_XU(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x2m_xu((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16x2 **)&(ars), (__art)); \ + }) + +#define AE_L16X4_I(ars, ae_immls64) \ + ({ \ + ae_int16x4 *__ars = (ars); \ + __builtin_xtensa_ae_l16x4_i((__ars), (ae_immls64)); \ + }) + +#define AE_L16X4_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + __builtin_xtensa_ae_l16x4_ip((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_L16X4_RIC(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l16x4_ric((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_L16X4_RIP(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l16x4_rip((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_L16X4_X(ars, art) \ + ({ \ + ae_int16x4 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x4_x((__ars), (__art)); \ + }) + +#define AE_L16X4_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x4_xc((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars), (__art)); \ + }) + +#define AE_L16X4_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x4_xp((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars), (__art)); \ + }) + +#define AE_L32_I(ars, ae_immls32) \ + ({ \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_l32_i((__ars), (ae_immls32)); \ + }) + +#define AE_L32_IP(ae_ls_v, ars, ae_immls32) \ + ({ \ + __builtin_xtensa_ae_l32_ip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (ae_immls32)); \ + }) + +#define AE_L32_X(ars, art) \ + ({ \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32_x((__ars), (__art)); \ + }) + +#define AE_L32_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32_xp((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32F24_I(ars, ae_immls32) \ + ({ \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_l32f24_i((__ars), (ae_immls32)); \ + }) + +#define AE_L32F24_IP(ae_ls_v, ars, ae_immls32) \ + ({ \ + __builtin_xtensa_ae_l32f24_ip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (ae_immls32)); \ + }) + +#define AE_L32F24_X(ars, art) \ + ({ \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32f24_x((__ars), (__art)); \ + }) + +#define AE_L32F24_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32f24_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32F24_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32f24_xp((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32M_I(ars, ae_immls32) \ + ({ \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_l32m_i((__ars), (ae_immls32)); \ + }) + +#define AE_L32M_IU(ae_ls_v, ars, ae_immls32) \ + ({ \ + __builtin_xtensa_ae_l32m_iu((ae_int64 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (ae_immls32)); \ + }) + +#define AE_L32M_X(ars, art) \ + ({ \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32m_x((__ars), (__art)); \ + }) + +#define AE_L32M_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32m_xc((ae_int64 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32M_XU(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32m_xu((ae_int64 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32X2_I(ars, ae_immls64) \ + ({ \ + ae_int32x2 *__ars = (ars); \ + __builtin_xtensa_ae_l32x2_i((__ars), (ae_immls64)); \ + }) + +#define AE_L32X2_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + __builtin_xtensa_ae_l32x2_ip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_L32X2_RIC(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l32x2_ric((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_L32X2_RIP(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l32x2_rip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_L32X2_X(ars, art) \ + ({ \ + ae_int32x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2_x((__ars), (__art)); \ + }) + +#define AE_L32X2_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_L32X2_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2_xp((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_L32X2F24_I(ars, ae_immls64) \ + ({ \ + ae_int32x2 *__ars = (ars); \ + __builtin_xtensa_ae_l32x2f24_i((__ars), (ae_immls64)); \ + }) + +#define AE_L32X2F24_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + __builtin_xtensa_ae_l32x2f24_ip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_L32X2F24_RIC(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l32x2f24_ric((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_L32X2F24_RIP(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l32x2f24_rip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_L32X2F24_X(ars, art) \ + ({ \ + ae_int32x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2f24_x((__ars), (__art)); \ + }) + +#define AE_L32X2F24_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2f24_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_L32X2F24_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2f24_xp((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_L64_I(ars, ae_immls64) \ + ({ \ + ae_int64 *__ars = (ars); \ + __builtin_xtensa_ae_l64_i((__ars), (ae_immls64)); \ + }) + +#define AE_L64_IP(ae_ls_v, ars, ae_immls64) \ + ({ \ + __builtin_xtensa_ae_l64_ip((ae_int64 *)&(ae_ls_v), \ + (const ae_int64 **)&(ars), (ae_immls64)); \ + }) + +#define AE_L64_X(ars, art) \ + ({ \ + ae_int64 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l64_x((__ars), (__art)); \ + }) + +#define AE_L64_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l64_xc((ae_int64 *)&(ae_ls_v), \ + (const ae_int64 **)&(ars), (__art)); \ + }) + +#define AE_L64_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l64_xp((ae_int64 *)&(ae_ls_v), \ + (const ae_int64 **)&(ars), (__art)); \ + }) + +#define AE_LA16X4_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4_ic((ae_int16x4 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4_ip((ae_int16x4 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4_ric((ae_int16x4 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4_rip((ae_int16x4 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4NEG_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4neg_pc((ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4POS_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4pos_pc((ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA24_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24_ic((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24_ip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24_ric((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24_rip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24NEG_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24neg_pc((ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24POS_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24pos_pc((ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2_ic((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2_ip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2_ric((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2_rip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2NEG_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2neg_pc((ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2POS_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2pos_pc((ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA32X2_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2_ic((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2_ip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2_ric((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2_rip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2F24_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2f24_ic((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2F24_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2f24_ip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2F24_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2f24_ric((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2F24_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2f24_rip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2NEG_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2neg_pc((ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2POS_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2pos_pc((ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA64_PP(ars) \ + ({ \ + void *__ars = (void *)(ars); \ + __builtin_xtensa_ae_la64_pp((__ars)); \ + }) + +#define AE_LALIGN64_I(ars, ae_immls64) \ + ({ \ + ae_valign *__ars = (ars); \ + __builtin_xtensa_ae_lalign64_i((__ars), (ae_immls64)); \ + }) + +#define AE_LB(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_lb((__art)); \ + }) + +#define AE_LBI(ae_ohba) ({ __builtin_xtensa_ae_lbi((ae_ohba)); }) + +#define AE_LBK(ars, art) \ + ({ \ + int __ars = (int)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_lbk((__ars), (__art)); \ + }) + +#define AE_LBKI(ars, ae_ohba) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_lbki((__ars), (ae_ohba)); \ + }) + +#define AE_LBS(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_lbs((__art)); \ + }) + +#define AE_LBSI(ae_ohba) ({ __builtin_xtensa_ae_lbsi((ae_ohba)); }) + +#define AE_LE16(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int16x4 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int16x4 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_le16((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LE32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_le32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LE64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_le64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LT16(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int16x4 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int16x4 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_lt16((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LT32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_lt32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LT64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_lt64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MAX32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_max32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MAX64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_max64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MAXABS32S(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_maxabs32s((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MAXABS64S(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_maxabs64s((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MIN32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_min32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MIN64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_min64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MINABS32S(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_minabs32s((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MINABS64S(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_minabs64s((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MOV(ae_to_dr_v0) \ + ({ \ + ae_int64 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_mov((__ae_to_dr_v0)); \ + }) + +#define AE_MOVAD16_0(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad16_0((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD16_1(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad16_1((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD16_2(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad16_2((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD16_3(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad16_3((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD32_H(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad32_h((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD32_L(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad32_l((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVALIGN(ae_uu_v) \ + ({ \ + ae_valign __ae_uu_v = (ae_uu_v); \ + __builtin_xtensa_ae_movalign((__ae_uu_v)); \ + }) + +#define AE_MOVDA16(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_movda16((__ars)); \ + }) + +#define AE_MOVDA16X2(ars, art) \ + ({ \ + int __ars = (int)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_movda16x2((__ars), (__art)); \ + }) + +#define AE_MOVDA32(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_movda32((__ars)); \ + }) + +#define AE_MOVDA32X2(ars, art) \ + ({ \ + int __ars = (int)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_movda32x2((__ars), (__art)); \ + }) + +#define AE_MOVF16X4(ae_cmov_v, ae_cmov_v0, bt4) \ + ({ \ + ae_int16x4 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool4 __bt4 = (bt4); \ + __builtin_xtensa_ae_movf16x4((ae_int16x4 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt4)); \ + }) + +#define AE_MOVF32X2(ae_cmov_v, ae_cmov_v0, bt2) \ + ({ \ + ae_int32x2 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool2 __bt2 = (bt2); \ + __builtin_xtensa_ae_movf32x2((ae_int32x2 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt2)); \ + }) + +#define AE_MOVF64(ae_cmov_v, ae_cmov_v0, bt) \ + ({ \ + ae_int64 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool __bt = (bt); \ + __builtin_xtensa_ae_movf64((ae_int64 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt)); \ + }) + +#define AE_MOVI(movi_imm) ({ __builtin_xtensa_ae_movi((movi_imm)); }) + +#define AE_MOVT16X4(ae_cmov_v, ae_cmov_v0, bt4) \ + ({ \ + ae_int16x4 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool4 __bt4 = (bt4); \ + __builtin_xtensa_ae_movt16x4((ae_int16x4 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt4)); \ + }) + +#define AE_MOVT32X2(ae_cmov_v, ae_cmov_v0, bt2) \ + ({ \ + ae_int32x2 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool2 __bt2 = (bt2); \ + __builtin_xtensa_ae_movt32x2((ae_int32x2 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt2)); \ + }) + +#define AE_MOVT64(ae_cmov_v, ae_cmov_v0, bt) \ + ({ \ + ae_int64 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool __bt = (bt); \ + __builtin_xtensa_ae_movt64((ae_int64 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt)); \ + }) + +#define AE_MUL16X4(ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mul16x4((__ae_mul_d1), (__ae_mul_d0)); \ + }) + +#define AE_MUL32_HH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32_hh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32_LH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32_lh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32_LL(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32_ll_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32U_LL(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32u_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_h0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_h0_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_H1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_h1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_h1_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_H2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_h2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_h2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_H3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_h3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_h3_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_L0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_l0_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_L1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_l1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_L1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_l1_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_L2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_l2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_L3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_l3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_L3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_l3_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA16X4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mula16x4((ae_int32x2 *)&(ae_mul_q1), \ + (ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d1), \ + (__ae_mul_d0)); \ + }) + +#define AE_MULA32_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32U_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32u_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_h0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_h0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_H1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_h1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_h1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_H2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_h2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_h2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_H3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_h3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_h3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_L0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_l0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_l0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_L1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_l1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_l1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_L2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_l2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_l2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_L3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_l3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_l3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaad24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAAD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad24_hh_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaad24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAAD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad24_hl_lh_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD32X16_H0_L1(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaad32x16_h0_l1( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAD32X16_H0_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad32x16_h0_l1_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaad32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD32X16_H2_L3(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaad32x16_h2_l3( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAD32X16_H2_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad32x16_h2_l3_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaad32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD16SS_11_00(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd16ss_11_00( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD16SS_11_00_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd16ss_11_00_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD16SS_13_02(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd16ss_13_02( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD16SS_13_02_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd16ss_13_02_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD16SS_33_22(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd16ss_33_22( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD16SS_33_22_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd16ss_33_22_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaafd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAAFD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd24_hh_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaafd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAAFD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd24_hl_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD32X16_H0_L1(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h0_l1( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD32X16_H0_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h0_l1_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD32X16_H2_L3(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h2_l3( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD32X16_H2_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h2_l3_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAC24(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulac24((ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d0), \ + (__ae_mul_d1)); \ + }) + +#define AE_MULAC32X16_H(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulac32x16_h((ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), \ + (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAC32X16_L(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulac32x16_l((ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), \ + (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAF16SS_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf16ss_00( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF16SS_00_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf16ss_00_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF16SS_10(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_10((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_11(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_11((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_20(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_20((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_21(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_21((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_22(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_22((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_30(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_30((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_31(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_31((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_32(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_32((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_33(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_33((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16X4SS(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mulaf16x4ss((ae_int32x2 *)&(ae_mul_q1), \ + (ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d1), \ + (__ae_mul_d0)); \ + }) + +#define AE_MULAF32R_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32r_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32R_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32r_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32R_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32r_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32R_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32r_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32S_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32s_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32S_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32s_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32S_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32s_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32S_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32s_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_H0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_h0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_H0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_h0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_H1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_h1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_H1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_h1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_H2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_h2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_H2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_h2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_H3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_h3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_H3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_h3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_L0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_l0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_l0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_L1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_l1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_l1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_L2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_l2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_l2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_L3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_l3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_l3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF48Q32SP16S_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf48q32sp16s_l( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF48Q32SP16S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf48q32sp16s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF48Q32SP16U_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf48q32sp16u_l( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF48Q32SP16U_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf48q32sp16u_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFC24RA(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulafc24ra((ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), \ + (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAFC32X16RAS_H(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulafc32x16ras_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAFC32X16RAS_L(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulafc32x16ras_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAFD24X2_FIR_H(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int32x2 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd24x2_fir_h( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD24X2_FIR_L(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int32x2 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd24x2_fir_l( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD32X16X2_FIR_HH(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd32x16x2_fir_hh( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD32X16X2_FIR_HL(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd32x16x2_fir_hl( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD32X16X2_FIR_LH(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd32x16x2_fir_lh( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD32X16X2_FIR_LL(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd32x16x2_fir_ll( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFP24X2R(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp24x2r((ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP24X2R_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp24x2r_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP24X2RA(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp24x2ra( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP24X2RA_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp24x2ra_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X16X2RAS_H(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x16x2ras_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X16X2RAS_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp32x16x2ras_h_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X16X2RAS_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x16x2ras_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X16X2RAS_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp32x16x2ras_l_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X16X2RS_H(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x16x2rs_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X16X2RS_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp32x16x2rs_h_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X16X2RS_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x16x2rs_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X16X2RS_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp32x16x2rs_l_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X2RAS(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x2ras( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X2RS(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x2rs( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFQ32SP24S_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafq32sp24s_h_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFQ32SP24S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafq32sp24s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAP24X2(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulap24x2((ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d0), \ + (__ae_mul_d1)); \ + }) + +#define AE_MULAP24X2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulap24x2_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAP32X16X2_H(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulap32x16x2_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAP32X16X2_L(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulap32x16x2_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAP32X2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulap32x2((ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAQ32SP16S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaq32sp16s_l_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAQ32SP16U_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaq32sp16u_l_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULARFQ32SP24S_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mularfq32sp24s_h_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULARFQ32SP24S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mularfq32sp24s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAS32F48P16S_HH(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_hh( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAS32F48P16S_HH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_hh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAS32F48P16S_LH(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_lh( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAS32F48P16S_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAS32F48P16S_LL(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_ll( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAS32F48P16S_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulasd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULASD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasd24_hh_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulasd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULASD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasd24_hl_lh_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulasd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULASD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulasd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULASD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASFD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulasfd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULASFD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasfd24_hh_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASFD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulasfd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULASFD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasfd24_hl_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulasfd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULASFD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasfd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulasfd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULASFD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasfd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULC24(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulc24((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULC32X16_H(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulc32x16_h((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULC32X16_L(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulc32x16_l((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULF16SS_00(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf16ss_00((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF16SS_00_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf16ss_00_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF16SS_10(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_10((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_11(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_11((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_20(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_20((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_21(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_21((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_22(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_22((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_30(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_30((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_31(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_31((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_32(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_32((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_33(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_33((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16X4SS(ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mulf16x4ss((__ae_mul_d1), (__ae_mul_d0)); \ + }) + +#define AE_MULF32R_HH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32r_hh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32R_LH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32r_lh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32R_LL(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32r_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32R_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32r_ll_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32S_HH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32s_hh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32S_LH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32s_lh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32S_LL(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32s_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32S_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32s_ll_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_H0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_h0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_H0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_h0_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_H1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_h1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_H1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_h1_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_H2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_h2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_H2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_h2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_H3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_h3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_H3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_h3_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_L0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_l0_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_L1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_l1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_L1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_l1_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_L2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_l2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_L3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_l3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_L3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_l3_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF48Q32SP16S_L(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf48q32sp16s_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF48Q32SP16S_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf48q32sp16s_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF48Q32SP16U_L(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf48q32sp16u_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF48Q32SP16U_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf48q32sp16u_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFC24RA(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulfc24ra((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULFC32X16RAS_H(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulfc32x16ras_h((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULFC32X16RAS_L(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulfc32x16ras_l((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULFD24X2_FIR_H(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int32x2 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd24x2_fir_h((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD24X2_FIR_L(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int32x2 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd24x2_fir_l((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD32X16X2_FIR_HH(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd32x16x2_fir_hh((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD32X16X2_FIR_HL(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd32x16x2_fir_hl((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD32X16X2_FIR_LH(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd32x16x2_fir_lh((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD32X16X2_FIR_LL(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd32x16x2_fir_ll((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFP16X4RAS(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulfp16x4ras((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULFP16X4S(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulfp16x4s((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULFP24X2R(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp24x2r((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP24X2R_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp24x2r_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP24X2RA(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp24x2ra((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP24X2RA_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp24x2ra_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X16X2RAS_H(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x16x2ras_h((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X16X2RAS_H_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp32x16x2ras_h_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X16X2RAS_L(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x16x2ras_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X16X2RAS_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp32x16x2ras_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X16X2RS_H(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x16x2rs_h((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X16X2RS_H_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp32x16x2rs_h_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X16X2RS_L(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x16x2rs_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X16X2RS_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp32x16x2rs_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X2RAS(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x2ras((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X2RS(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x2rs((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFQ32SP24S_H_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfq32sp24s_h_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFQ32SP24S_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfq32sp24s_l_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULP24X2(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulp24x2((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULP24X2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulp24x2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULP32X16X2_H(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulp32x16x2_h((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULP32X16X2_L(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulp32x16x2_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULP32X2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulp32x2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULQ32SP16S_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulq32sp16s_l_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULQ32SP16U_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulq32sp16u_l_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULRFQ32SP24S_H_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulrfq32sp24s_h_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULRFQ32SP24S_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulrfq32sp24s_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS16X4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_muls16x4((ae_int32x2 *)&(ae_mul_q1), \ + (ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d1), \ + (__ae_mul_d0)); \ + }) + +#define AE_MULS32_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32F48P16S_HH(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32f48p16s_hh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32F48P16S_HH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32f48p16s_hh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32F48P16S_LH(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32f48p16s_lh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32F48P16S_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32f48p16s_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32F48P16S_LL(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32f48p16s_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32F48P16S_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32f48p16s_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32U_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32u_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_h0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_h0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_H1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_h1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_h1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_H2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_h2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_h2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_H3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_h3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_h3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_L0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_l0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_l0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_L1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_l1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_l1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_L2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_l2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_l2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_L3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_l3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_l3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsad24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSAD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsad24_hh_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAD32X16_H1_L0(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsad32x16_h1_l0((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSAD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsad32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAD32X16_H3_L2(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsad32x16_h3_l2((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSAD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsad32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAFD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsafd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSAFD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsafd24_hh_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsafd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSAFD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsafd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsafd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSAFD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsafd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF16SS_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf16ss_00( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF16SS_00_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf16ss_00_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF16SS_10(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_10((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_11(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_11((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_20(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_20((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_21(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_21((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_22(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_22((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_30(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_30((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_31(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_31((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_32(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_32((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_33(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_33((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16X4SS(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mulsf16x4ss((ae_int32x2 *)&(ae_mul_q1), \ + (ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d1), \ + (__ae_mul_d0)); \ + }) + +#define AE_MULSF32R_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32r_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32R_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32r_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32R_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32r_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32R_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32r_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32S_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32s_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32S_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32s_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32S_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32s_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_h0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_h0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_H1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_h1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_h1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_H2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_h2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_h2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_H3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_h3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_h3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_L0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_l0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_l0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_L1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_l1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_l1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_L2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_l2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_l2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_L3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_l3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_l3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF48Q32SP16S_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf48q32sp16s_l( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF48Q32SP16S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf48q32sp16s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF48Q32SP16U_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf48q32sp16u_l( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF48Q32SP16U_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf48q32sp16u_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP24X2R(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp24x2r((ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP24X2R_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp24x2r_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP24X2RA(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp24x2ra( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP24X2RA_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp24x2ra_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X16X2RAS_H(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2ras_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X16X2RAS_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2ras_h_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X16X2RAS_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2ras_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X16X2RAS_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2ras_l_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X16X2RS_H(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2rs_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X16X2RS_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2rs_h_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X16X2RS_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2rs_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X16X2RS_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2rs_l_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X2RAS(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x2ras( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X2RS(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x2rs( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFQ32SP24S_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfq32sp24s_h_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFQ32SP24S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfq32sp24s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSP24X2(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsp24x2((ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d0), \ + (__ae_mul_d1)); \ + }) + +#define AE_MULSP24X2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsp24x2_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSP32X16X2_H(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsp32x16x2_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSP32X16X2_L(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsp32x16x2_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSP32X2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsp32x2((ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSQ32SP16S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsq32sp16s_l_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSQ32SP16U_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsq32sp16u_l_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSRFQ32SP24S_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsrfq32sp24s_h_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSRFQ32SP24S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsrfq32sp24s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSS32F48P16S_HH(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_hh( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSS32F48P16S_HH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_hh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSS32F48P16S_LH(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_lh( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSS32F48P16S_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSS32F48P16S_LL(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_ll( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSS32F48P16S_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulssd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSSD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssd24_hh_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulssd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSSD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssd24_hl_lh_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD16SS_11_00(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd16ss_11_00( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD16SS_11_00_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd16ss_11_00_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD16SS_13_02(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd16ss_13_02( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD16SS_13_02_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd16ss_13_02_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD16SS_33_22(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd16ss_33_22( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD16SS_33_22_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd16ss_33_22_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulssfd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSSFD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd24_hh_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulssfd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSSFD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd24_hl_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzaad24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZAAD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzaad24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZAAD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD32X16_H0_L1(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h0_l1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAD32X16_H0_L1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h0_l1_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD32X16_H2_L3(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h2_l3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAD32X16_H2_L3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h2_l3_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD16SS_11_00(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_11_00((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD16SS_11_00_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_11_00_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD16SS_13_02(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_13_02((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD16SS_13_02_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_13_02_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD16SS_33_22(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_33_22((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD16SS_33_22_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_33_22_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzaafd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZAAFD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzaafd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZAAFD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD32X16_H0_L1(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h0_l1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD32X16_H0_L1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD32X16_H2_L3(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h2_l3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD32X16_H2_L3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzasd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZASD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzasd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZASD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzasd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZASD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzasd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZASD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASFD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzasfd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZASFD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasfd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASFD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzasfd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZASFD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasfd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzasfd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZASFD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzasfd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZASFD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzsad24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSAD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsad24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAD32X16_H1_L0(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzsad32x16_h1_l0((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSAD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsad32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAD32X16_H3_L2(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzsad32x16_h3_l2((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSAD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsad32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAFD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzsafd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSAFD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsafd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzsafd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSAFD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzsafd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSAFD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzssd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSSD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzssd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSSD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD16SS_11_00(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_11_00((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD16SS_11_00_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_11_00_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD16SS_13_02(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_13_02((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD16SS_13_02_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_13_02_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD16SS_33_22(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_33_22((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD16SS_33_22_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_33_22_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzssfd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSSFD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzssfd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSSFD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_NAND(ae_dr_to_dr_v0, ae_dr_to_dr_v1) \ + ({ \ + ae_int64 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int64 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_nand((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1)); \ + }) + +#define AE_NEG16S(ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg16s((__ae_arth_v1)); \ + }) + +#define AE_NEG24S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg24s((__ae_arth_v1)); \ + }) + +#define AE_NEG32(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg32((__ae_arth_v1)); \ + }) + +#define AE_NEG32S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg32s((__ae_arth_v1)); \ + }) + +#define AE_NEG64(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg64((__ae_arth_v1)); \ + }) + +#define AE_NEG64S(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg64s((__ae_arth_v1)); \ + }) + +#define AE_NSA64(ae_dr_to_ar_v0) \ + ({ \ + ae_int64 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_nsa64((__ae_dr_to_ar_v0)); \ + }) + +#define AE_NSAZ16_0(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_nsaz16_0((__ae_dr_to_ar_v0)); \ + }) + +#define AE_NSAZ32_L(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_nsaz32_l((__ae_dr_to_ar_v0)); \ + }) + +#define AE_OR(ae_dr_to_dr_v0, ae_dr_to_dr_v1) \ + ({ \ + ae_int64 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int64 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_or((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1)); \ + }) + +#define AE_PKSR24(ae_pks_d, ae_pks_s, ae_imm2) \ + ({ \ + ae_int64 __ae_pks_s = (ae_pks_s); \ + __builtin_xtensa_ae_pksr24((ae_int32x2 *)&(ae_pks_d), (__ae_pks_s), \ + (ae_imm2)); \ + }) + +#define AE_PKSR32(ae_pks_d, ae_pks_s, ae_imm2) \ + ({ \ + ae_int64 __ae_pks_s = (ae_pks_s); \ + __builtin_xtensa_ae_pksr32((ae_int32x2 *)&(ae_pks_d), (__ae_pks_s), \ + (ae_imm2)); \ + }) + +#define AE_ROUND16X4F32SASYM(ae_arth_v1, ae_arth_v0) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + __builtin_xtensa_ae_round16x4f32sasym((__ae_arth_v1), (__ae_arth_v0)); \ + }) + +#define AE_ROUND16X4F32SSYM(ae_arth_v1, ae_arth_v0) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + __builtin_xtensa_ae_round16x4f32ssym((__ae_arth_v1), (__ae_arth_v0)); \ + }) + +#define AE_ROUND24X2F48SASYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round24x2f48sasym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND24X2F48SSYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round24x2f48ssym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND32X2F48SASYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round32x2f48sasym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND32X2F48SSYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round32x2f48ssym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND32X2F64SASYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round32x2f64sasym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND32X2F64SSYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round32x2f64ssym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUNDSP16F24ASYM(ae_arth_v0) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + __builtin_xtensa_ae_roundsp16f24asym((__ae_arth_v0)); \ + }) + +#define AE_ROUNDSP16F24SYM(ae_arth_v0) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + __builtin_xtensa_ae_roundsp16f24sym((__ae_arth_v0)); \ + }) + +#define AE_ROUNDSP16Q48X2ASYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_roundsp16q48x2asym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUNDSP16Q48X2SYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_roundsp16q48x2sym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUNDSQ32F48ASYM(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_roundsq32f48asym((__ae_arth_v1)); \ + }) + +#define AE_ROUNDSQ32F48SYM(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_roundsq32f48sym((__ae_arth_v1)); \ + }) + +#define AE_S16_0_I(ae_ls_v, ars, ae_immls16) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + ae_int16 *__ars = (ars); \ + __builtin_xtensa_ae_s16_0_i((__ae_ls_v), (__ars), (ae_immls16)); \ + }) + +#define AE_S16_0_IP(ae_ls_v, ars, ae_immls16) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16_0_ip((__ae_ls_v), (ae_int16 **)&(ars), \ + (ae_immls16)); \ + }) + +#define AE_S16_0_X(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + ae_int16 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16_0_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S16_0_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16_0_xc((__ae_ls_v), (ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_S16_0_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16_0_xp((__ae_ls_v), (ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_S16M_L_I(ae_ls_v, ars, ae_immls16) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int16 *__ars = (ars); \ + __builtin_xtensa_ae_s16m_l_i((__ae_ls_v), (__ars), (ae_immls16)); \ + }) + +#define AE_S16M_L_IU(ae_ls_v, ars, ae_immls16) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16m_l_iu((__ae_ls_v), (ae_int16 **)&(ars), \ + (ae_immls16)); \ + }) + +#define AE_S16M_L_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int16 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16m_l_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S16M_L_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16m_l_xc((__ae_ls_v), (ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_S16M_L_XU(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16m_l_xu((__ae_ls_v), (ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_S16X2M_I(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int16x2 *__ars = (ars); \ + __builtin_xtensa_ae_s16x2m_i((__ae_ls_v), (__ars), (ae_immls32)); \ + }) + +#define AE_S16X2M_IU(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16x2m_iu((__ae_ls_v), (ae_int16x2 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S16X2M_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int16x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x2m_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S16X2M_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x2m_xc((__ae_ls_v), (ae_int16x2 **)&(ars), \ + (__art)); \ + }) + +#define AE_S16X2M_XU(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x2m_xu((__ae_ls_v), (ae_int16x2 **)&(ars), \ + (__art)); \ + }) + +#define AE_S16X4_I(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + ae_int16x4 *__ars = (ars); \ + __builtin_xtensa_ae_s16x4_i((__ae_ls_v), (__ars), (ae_immls64)); \ + }) + +#define AE_S16X4_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16x4_ip((__ae_ls_v), (ae_int16x4 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_S16X4_RIC(ae_ls_v, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16x4_ric((__ae_ls_v), (ae_int16x4 **)&(ars)); \ + }) + +#define AE_S16X4_RIP(ae_ls_v, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16x4_rip((__ae_ls_v), (ae_int16x4 **)&(ars)); \ + }) + +#define AE_S16X4_X(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + ae_int16x4 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x4_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S16X4_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x4_xc((__ae_ls_v), (ae_int16x4 **)&(ars), (__art)); \ + }) + +#define AE_S16X4_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x4_xp((__ae_ls_v), (ae_int16x4 **)&(ars), (__art)); \ + }) + +#define AE_S24RA64S_I(ae_ls_v1, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s24ra64s_i((__ae_ls_v1), (__ars), (ae_immls32)); \ + }) + +#define AE_S24RA64S_IP(ae_ls_v1, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + __builtin_xtensa_ae_s24ra64s_ip((__ae_ls_v1), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S24RA64S_X(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s24ra64s_x((__ae_ls_v1), (__ars), (__art)); \ + }) + +#define AE_S24RA64S_XC(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s24ra64s_xc((__ae_ls_v1), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S24RA64S_XP(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s24ra64s_xp((__ae_ls_v1), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S24X2RA64S_IP(ae_ls_v2, ae_ls_v1, ars) \ + ({ \ + ae_int64 __ae_ls_v2 = (ae_ls_v2); \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + __builtin_xtensa_ae_s24x2ra64s_ip((__ae_ls_v2), (__ae_ls_v1), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32_L_I(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s32_l_i((__ae_ls_v), (__ars), (ae_immls32)); \ + }) + +#define AE_S32_L_IP(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32_l_ip((__ae_ls_v), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S32_L_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32_l_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32_L_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32_l_xc((__ae_ls_v), (ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_S32_L_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32_l_xp((__ae_ls_v), (ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_S32F24_L_I(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s32f24_l_i((__ae_ls_v), (__ars), (ae_immls32)); \ + }) + +#define AE_S32F24_L_IP(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32f24_l_ip((__ae_ls_v), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S32F24_L_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32f24_l_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32F24_L_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32f24_l_xc((__ae_ls_v), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32F24_L_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32f24_l_xp((__ae_ls_v), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32M_I(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s32m_i((__ae_ls_v), (__ars), (ae_immls32)); \ + }) + +#define AE_S32M_IU(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32m_iu((__ae_ls_v), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S32M_X(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32m_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32M_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32m_xc((__ae_ls_v), (ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_S32M_XU(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32m_xu((__ae_ls_v), (ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_S32RA64S_I(ae_ls_v1, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s32ra64s_i((__ae_ls_v1), (__ars), (ae_immls32)); \ + }) + +#define AE_S32RA64S_IP(ae_ls_v1, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + __builtin_xtensa_ae_s32ra64s_ip((__ae_ls_v1), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S32RA64S_X(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32ra64s_x((__ae_ls_v1), (__ars), (__art)); \ + }) + +#define AE_S32RA64S_XC(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32ra64s_xc((__ae_ls_v1), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32RA64S_XP(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32ra64s_xp((__ae_ls_v1), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32X2_I(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32x2 *__ars = (ars); \ + __builtin_xtensa_ae_s32x2_i((__ae_ls_v), (__ars), (ae_immls64)); \ + }) + +#define AE_S32X2_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2_ip((__ae_ls_v), (ae_int32x2 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_S32X2_RIC(ae_ls_v, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2_ric((__ae_ls_v), (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32X2_RIP(ae_ls_v, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2_rip((__ae_ls_v), (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32X2_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32X2_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2_xc((__ae_ls_v), (ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_S32X2_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2_xp((__ae_ls_v), (ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_S32X2F24_I(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32x2 *__ars = (ars); \ + __builtin_xtensa_ae_s32x2f24_i((__ae_ls_v), (__ars), (ae_immls64)); \ + }) + +#define AE_S32X2F24_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2f24_ip((__ae_ls_v), (ae_int32x2 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_S32X2F24_RIC(ae_ls_v, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2f24_ric((__ae_ls_v), (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32X2F24_RIP(ae_ls_v, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2f24_rip((__ae_ls_v), (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32X2F24_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2f24_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32X2F24_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2f24_xc((__ae_ls_v), (ae_int32x2 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32X2F24_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2f24_xp((__ae_ls_v), (ae_int32x2 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32X2RA64S_IP(ae_ls_v2, ae_ls_v1, ars) \ + ({ \ + ae_int64 __ae_ls_v2 = (ae_ls_v2); \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + __builtin_xtensa_ae_s32x2ra64s_ip((__ae_ls_v2), (__ae_ls_v1), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S64_I(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + ae_int64 *__ars = (ars); \ + __builtin_xtensa_ae_s64_i((__ae_ls_v), (__ars), (ae_immls64)); \ + }) + +#define AE_S64_IP(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s64_ip((__ae_ls_v), (ae_int64 **)&(ars), \ + (ae_immls64)); \ + }) + +#define AE_S64_X(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + ae_int64 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s64_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S64_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s64_xc((__ae_ls_v), (ae_int64 **)&(ars), (__art)); \ + }) + +#define AE_S64_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s64_xp((__ae_ls_v), (ae_int64 **)&(ars), (__art)); \ + }) + +#define AE_SA16X4_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa16x4_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int16x4 **)&(ars)); \ + }) + +#define AE_SA16X4_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa16x4_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int16x4 **)&(ars)); \ + }) + +#define AE_SA16X4_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa16x4_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int16x4 **)&(ars)); \ + }) + +#define AE_SA16X4_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa16x4_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int16x4 **)&(ars)); \ + }) + +#define AE_SA24_L_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24_l_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24_L_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24_l_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24_L_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24_l_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24_L_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24_l_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24X2_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24x2_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24X2_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24x2_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24X2_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24x2_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24X2_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24x2_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA32X2_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2F24_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2f24_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2F24_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2f24_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2F24_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2f24_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2F24_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2f24_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA64NEG_FP(ae_ls_su, ars) \ + ({ \ + void *__ars = (void *)(ars); \ + __builtin_xtensa_ae_sa64neg_fp((ae_valign *)&(ae_ls_su), (__ars)); \ + }) + +#define AE_SA64POS_FP(ae_ls_su, ars) \ + ({ \ + void *__ars = (void *)(ars); \ + __builtin_xtensa_ae_sa64pos_fp((ae_valign *)&(ae_ls_su), (__ars)); \ + }) + +#define AE_SALIGN64_I(ae_ls_su, ars, ae_immls64) \ + ({ \ + ae_valign __ae_ls_su = (ae_ls_su); \ + ae_valign *__ars = (ars); \ + __builtin_xtensa_ae_salign64_i((__ae_ls_su), (__ars), (ae_immls64)); \ + }) + +#define AE_SAT16X4(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sat16x4((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SAT24S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sat24s((__ae_arth_v1)); \ + }) + +#define AE_SAT48S(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sat48s((__ae_arth_v1)); \ + }) + +#define AE_SATQ56S(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_satq56s((__ae_arth_v1)); \ + }) + +#define AE_SB(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sb((short **)&(ars), (__art)); \ + }) + +#define AE_SB_IC(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sb_ic((short **)&(ars), (__art)); \ + }) + +#define AE_SB_IP(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sb_ip((short **)&(ars), (__art)); \ + }) + +#define AE_SBF(ars) ({ __builtin_xtensa_ae_sbf((short **)&(ars)); }) + +#define AE_SBF_IC(ars) ({ __builtin_xtensa_ae_sbf_ic((short **)&(ars)); }) + +#define AE_SBF_IP(ars) ({ __builtin_xtensa_ae_sbf_ip((short **)&(ars)); }) + +#define AE_SBI(ars, art, ae_ohba2) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sbi((short **)&(ars), (__art), (ae_ohba2)); \ + }) + +#define AE_SBI_IC(ars, art, ae_ohba2) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sbi_ic((short **)&(ars), (__art), (ae_ohba2)); \ + }) + +#define AE_SBI_IP(ars, art, ae_ohba2) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sbi_ip((short **)&(ars), (__art), (ae_ohba2)); \ + }) + +#define AE_SEL16I(ae_dr_to_dr_v0, ae_dr_to_dr_v1, ae_selimm) \ + ({ \ + ae_int16x4 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int16x4 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_sel16i((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1), \ + (ae_selimm)); \ + }) + +#define AE_SEL16I_N(ae_dr_to_dr_v0, ae_dr_to_dr_v1, ae_selimm_N) \ + ({ \ + ae_int16x4 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int16x4 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_sel16i_n((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1), \ + (ae_selimm_N)); \ + }) + +#define AE_SEXT32(ae_dr_to_dr_v0, ae_opnd_tp7) \ + ({ \ + ae_int32x2 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + __builtin_xtensa_ae_sext32((__ae_dr_to_dr_v0), (ae_opnd_tp7)); \ + }) + +#define AE_SEXT32X2D16_10(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_sext32x2d16_10((__ae_to_dr_v0)); \ + }) + +#define AE_SEXT32X2D16_32(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_sext32x2d16_32((__ae_to_dr_v0)); \ + }) + +#define AE_SHA32(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sha32((__ars)); \ + }) + +#define AE_SHORTSWAP(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_shortswap((__ae_to_dr_v0)); \ + }) + +#define AE_SLAA16S(ae_shift_d0, ars) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa16s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAA32(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa32((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAA32S(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa32s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAA64(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa64((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAA64S(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa64s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAAQ56(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaaq56((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAI16S(ae_shift_d0, ae_osa16) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai16s((__ae_shift_d0), (ae_osa16)); \ + }) + +#define AE_SLAI24(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai24((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SLAI24S(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai24s((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SLAI32(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai32((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SLAI32S(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai32s((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SLAI64(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai64((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SLAI64S(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai64s((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SLAISQ56S(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slaisq56s((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SLAS24(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas24((__ae_shift_d0)); \ + }) + +#define AE_SLAS24S(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas24s((__ae_shift_d0)); \ + }) + +#define AE_SLAS32(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas32((__ae_shift_d0)); \ + }) + +#define AE_SLAS32S(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas32s((__ae_shift_d0)); \ + }) + +#define AE_SLAS64(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas64((__ae_shift_d0)); \ + }) + +#define AE_SLAS64S(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas64s((__ae_shift_d0)); \ + }) + +#define AE_SLASQ56(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slasq56((__ae_shift_d0)); \ + }) + +#define AE_SLASSQ56S(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slassq56s((__ae_shift_d0)); \ + }) + +#define AE_SRA64_32(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sra64_32((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA16RS(ae_shift_d0, ars) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa16rs((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA16S(ae_shift_d0, ars) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa16s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA32(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa32((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA32RS(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa32rs((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA32S(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa32s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA64(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa64((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAI16(ae_shift_d0, ae_osa16) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai16((__ae_shift_d0), (ae_osa16)); \ + }) + +#define AE_SRAI16R(ae_shift_d0, ae_osa16) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai16r((__ae_shift_d0), (ae_osa16)); \ + }) + +#define AE_SRAI24(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai24((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRAI32(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai32((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRAI32R(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai32r((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRAI64(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai64((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SRAS24(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_sras24((__ae_shift_d0)); \ + }) + +#define AE_SRAS32(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_sras32((__ae_shift_d0)); \ + }) + +#define AE_SRAS64(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_sras64((__ae_shift_d0)); \ + }) + +#define AE_SRLA32(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_srla32((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRLA64(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_srla64((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRLI24(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srli24((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRLI32(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srli32((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRLI64(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srli64((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SRLS24(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srls24((__ae_shift_d0)); \ + }) + +#define AE_SRLS32(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srls32((__ae_shift_d0)); \ + }) + +#define AE_SRLS64(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srls64((__ae_shift_d0)); \ + }) + +#define AE_SUB16(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v0 = (ae_arth_v0); \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub16((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB16S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v0 = (ae_arth_v0); \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub16s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB24S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub24s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB32(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub32((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB32S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub32s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB64(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub64((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB64S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub64s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUBADD32(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_subadd32((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUBADD32S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_subadd32s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_TRUNCA32F64S_L(ae_shift_d0, ae_shift_sd, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + ae_int64 __ae_shift_sd = (ae_shift_sd); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_trunca32f64s_l((__ae_shift_d0), (__ae_shift_sd), \ + (__ars)); \ + }) + +#define AE_TRUNCA32X2F64S(ae_shift_d0, ae_shift_sd, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + ae_int64 __ae_shift_sd = (ae_shift_sd); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_trunca32x2f64s((__ae_shift_d0), (__ae_shift_sd), \ + (__ars)); \ + }) + +#define AE_TRUNCI32F64S_L(ae_shift_d0, ae_shift_sd, ae_osa16) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + ae_int64 __ae_shift_sd = (ae_shift_sd); \ + __builtin_xtensa_ae_trunci32f64s_l((__ae_shift_d0), (__ae_shift_sd), \ + (ae_osa16)); \ + }) + +#define AE_TRUNCI32X2F64S(ae_shift_d0, ae_shift_sd, ae_osa16) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + ae_int64 __ae_shift_sd = (ae_shift_sd); \ + __builtin_xtensa_ae_trunci32x2f64s((__ae_shift_d0), (__ae_shift_sd), \ + (ae_osa16)); \ + }) + +#define AE_VLDL16C(ars) \ + ({ __builtin_xtensa_ae_vldl16c((const short **)&(ars)); }) + +#define AE_VLDL16C_IC(ars) \ + ({ __builtin_xtensa_ae_vldl16c_ic((const short **)&(ars)); }) + +#define AE_VLDL16C_IP(ars) \ + ({ __builtin_xtensa_ae_vldl16c_ip((const short **)&(ars)); }) + +#define AE_VLDL16T(ars) \ + ({ \ + short *__ars = (short *)(ars); \ + __builtin_xtensa_ae_vldl16t((__ars)); \ + }) + +#define AE_VLDL32T(ars) \ + ({ \ + int *__ars = (int *)(ars); \ + __builtin_xtensa_ae_vldl32t((__ars)); \ + }) + +#define AE_VLDSHT(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_vldsht((__art)); \ + }) + +#define AE_VLEL16T(br, art, ars) \ + ({ \ + short *__ars = (short *)(ars); \ + __builtin_xtensa_ae_vlel16t((xtbool *)&(br), (int *)&(art), (__ars)); \ + }) + +#define AE_VLEL32T(br, art, ars) \ + ({ \ + int *__ars = (int *)(ars); \ + __builtin_xtensa_ae_vlel32t((xtbool *)&(br), (int *)&(art), (__ars)); \ + }) + +#define AE_VLES16C(ars) ({ __builtin_xtensa_ae_vles16c((short **)&(ars)); }) + +#define AE_VLES16C_IC(ars) \ + ({ __builtin_xtensa_ae_vles16c_ic((short **)&(ars)); }) + +#define AE_VLES16C_IP(ars) \ + ({ __builtin_xtensa_ae_vles16c_ip((short **)&(ars)); }) + +#define AE_XOR(ae_dr_to_dr_v0, ae_dr_to_dr_v1) \ + ({ \ + ae_int64 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int64 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_xor((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1)); \ + }) + +#define AE_ZALIGN64() ({ __builtin_xtensa_ae_zalign64(); }) + +#define RUR_AE_BITHEAD() ({ __builtin_xtensa_rur_ae_bithead(); }) + +#define RUR_AE_BITPTR() ({ __builtin_xtensa_rur_ae_bitptr(); }) + +#define RUR_AE_BITSUSED() ({ __builtin_xtensa_rur_ae_bitsused(); }) + +#define RUR_AE_CBEGIN0() ({ __builtin_xtensa_rur_ae_cbegin0(); }) + +#define RUR_AE_CEND0() ({ __builtin_xtensa_rur_ae_cend0(); }) + +#define RUR_AE_CW_SD_NO() ({ __builtin_xtensa_rur_ae_cw_sd_no(); }) + +#define RUR_AE_CWRAP() ({ __builtin_xtensa_rur_ae_cwrap(); }) + +#define RUR_AE_FIRST_TS() ({ __builtin_xtensa_rur_ae_first_ts(); }) + +#define RUR_AE_NEXTOFFSET() ({ __builtin_xtensa_rur_ae_nextoffset(); }) + +#define RUR_AE_OVERFLOW() ({ __builtin_xtensa_rur_ae_overflow(); }) + +#define RUR_AE_OVF_SAR() ({ __builtin_xtensa_rur_ae_ovf_sar(); }) + +#define RUR_AE_SAR() ({ __builtin_xtensa_rur_ae_sar(); }) + +#define RUR_AE_SEARCHDONE() ({ __builtin_xtensa_rur_ae_searchdone(); }) + +#define RUR_AE_TABLESIZE() ({ __builtin_xtensa_rur_ae_tablesize(); }) + +#define RUR_AE_TS_FTS_BU_BP() ({ __builtin_xtensa_rur_ae_ts_fts_bu_bp(); }) + +#define WUR_AE_BITHEAD(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_bithead((__art)); \ + }) + +#define WUR_AE_BITPTR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_bitptr((__art)); \ + }) + +#define WUR_AE_BITSUSED(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_bitsused((__art)); \ + }) + +#define WUR_AE_CBEGIN0(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_cbegin0((__art)); \ + }) + +#define WUR_AE_CEND0(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_cend0((__art)); \ + }) + +#define WUR_AE_CW_SD_NO(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_cw_sd_no((__art)); \ + }) + +#define WUR_AE_CWRAP(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_cwrap((__art)); \ + }) + +#define WUR_AE_FIRST_TS(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_first_ts((__art)); \ + }) + +#define WUR_AE_NEXTOFFSET(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_nextoffset((__art)); \ + }) + +#define WUR_AE_OVERFLOW(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_overflow((__art)); \ + }) + +#define WUR_AE_OVF_SAR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_ovf_sar((__art)); \ + }) + +#define WUR_AE_SAR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_sar((__art)); \ + }) + +#define WUR_AE_SEARCHDONE(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_searchdone((__art)); \ + }) + +#define WUR_AE_TABLESIZE(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_tablesize((__art)); \ + }) + +#define WUR_AE_TS_FTS_BU_BP(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_ts_fts_bu_bp((__art)); \ + }) + +#define XT_ABS_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_abs_s((__frs)); \ + }) + +#define XT_ADD_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_add_s((__frs), (__frt)); \ + }) + +#define XT_ADDEXP_S(frr, frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_addexp_s((xtfloat *)&(frr), (__frs)); \ + }) + +#define XT_ADDEXPM_S(frr, frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_addexpm_s((xtfloat *)&(frr), (__frs)); \ + }) + +#define XT_CEIL_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_ceil_s((__frs), (imm_t)); \ + }) + +#define XT_DIV0_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_div0_s((__frs)); \ + }) + +#define XT_DIVN_S(frr, frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_divn_s((xtfloat *)&(frr), (__frs), (__frt)); \ + }) + +#define XT_FLOAT_S(ars, imm_t) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_xt_float_s((__ars), (imm_t)); \ + }) + +#define XT_FLOOR_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_floor_s((__frs), (imm_t)); \ + }) + +#define XT_LSI(ars, imm8x4) \ + ({ \ + xtfloat *__ars = (xtfloat *)(ars); \ + __builtin_xtensa_xt_lsi((__ars), (imm8x4)); \ + }) + +#define XT_LSIP(frt, ars, imm8x4) \ + ({ \ + __builtin_xtensa_xt_lsip((xtfloat *)&(frt), (const xtfloat **)&(ars), \ + (imm8x4)); \ + }) + +#define XT_LSX(ars, art) \ + ({ \ + xtfloat *__ars = (xtfloat *)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_lsx((__ars), (__art)); \ + }) + +#define XT_LSXP(frr, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_xt_lsxp((xtfloat *)&(frr), (const xtfloat **)&(ars), \ + (__art)); \ + }) + +#define XT_MADD_S(frr, frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_madd_s((xtfloat *)&(frr), (__frs), (__frt)); \ + }) + +#define XT_MADDN_S(frr, frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_maddn_s((xtfloat *)&(frr), (__frs), (__frt)); \ + }) + +#define XT_MKDADJ_S(frr, frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_mkdadj_s((xtfloat *)&(frr), (__frs)); \ + }) + +#define XT_MKSADJ_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_mksadj_s((__frs)); \ + }) + +#define XT_MOV_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_mov_s((__frs)); \ + }) + +#define XT_MOVEQZ_S(frr, frs, art) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_moveqz_s((xtfloat *)&(frr), (__frs), (__art)); \ + }) + +#define XT_MOVF_S(frr, frs, bt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtbool __bt = (bt); \ + __builtin_xtensa_xt_movf_s((xtfloat *)&(frr), (__frs), (__bt)); \ + }) + +#define XT_MOVGEZ_S(frr, frs, art) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_movgez_s((xtfloat *)&(frr), (__frs), (__art)); \ + }) + +#define XT_MOVLTZ_S(frr, frs, art) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_movltz_s((xtfloat *)&(frr), (__frs), (__art)); \ + }) + +#define XT_MOVNEZ_S(frr, frs, art) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_movnez_s((xtfloat *)&(frr), (__frs), (__art)); \ + }) + +#define XT_MOVT_S(frr, frs, bt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtbool __bt = (bt); \ + __builtin_xtensa_xt_movt_s((xtfloat *)&(frr), (__frs), (__bt)); \ + }) + +#define XT_MSUB_S(frr, frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_msub_s((xtfloat *)&(frr), (__frs), (__frt)); \ + }) + +#define XT_MUL_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_mul_s((__frs), (__frt)); \ + }) + +#define XT_NEG_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_neg_s((__frs)); \ + }) + +#define XT_NEXP01_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_nexp01_s((__frs)); \ + }) + +#define XT_OEQ_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_oeq_s((__frs), (__frt)); \ + }) + +#define XT_OLE_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ole_s((__frs), (__frt)); \ + }) + +#define XT_OLT_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_olt_s((__frs), (__frt)); \ + }) + +#define XT_RECIP0_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_recip0_s((__frs)); \ + }) + +#define XT_RFR(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_rfr((__frs)); \ + }) + +#define XT_ROUND_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_round_s((__frs), (imm_t)); \ + }) + +#define XT_RSQRT0_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_rsqrt0_s((__frs)); \ + }) + +#define XT_RUR_FCR() ({ __builtin_xtensa_xt_rur_fcr(); }) + +#define XT_RUR_FSR() ({ __builtin_xtensa_xt_rur_fsr(); }) + +#define XT_SQRT0_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_sqrt0_s((__frs)); \ + }) + +#define XT_SSI(frt, ars, imm8x4) \ + ({ \ + xtfloat __frt = (xtfloat)(frt); \ + xtfloat *__ars = (xtfloat *)(ars); \ + __builtin_xtensa_xt_ssi((__frt), (__ars), (imm8x4)); \ + }) + +#define XT_SSIP(frt, ars, imm8x4) \ + ({ \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ssip((__frt), (xtfloat **)&(ars), (imm8x4)); \ + }) + +#define XT_SSX(frr, ars, art) \ + ({ \ + xtfloat __frr = (xtfloat)(frr); \ + xtfloat *__ars = (xtfloat *)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_ssx((__frr), (__ars), (__art)); \ + }) + +#define XT_SSXP(frr, ars, art) \ + ({ \ + xtfloat __frr = (xtfloat)(frr); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_ssxp((__frr), (xtfloat **)&(ars), (__art)); \ + }) + +#define XT_SUB_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_sub_s((__frs), (__frt)); \ + }) + +#define XT_TRUNC_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_trunc_s((__frs), (imm_t)); \ + }) + +#define XT_UEQ_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ueq_s((__frs), (__frt)); \ + }) + +#define XT_UFLOAT_S(ars, imm_t) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_xt_ufloat_s((__ars), (imm_t)); \ + }) + +#define XT_ULE_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ule_s((__frs), (__frt)); \ + }) + +#define XT_ULT_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ult_s((__frs), (__frt)); \ + }) + +#define XT_UN_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_un_s((__frs), (__frt)); \ + }) + +#define XT_UTRUNC_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_utrunc_s((__frs), (imm_t)); \ + }) + +#define XT_WFR(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_xt_wfr((__ars)); \ + }) + +#define XT_WUR_FCR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_xt_wur_fcr((__art)); \ + }) + +#define XT_WUR_FSR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_xt_wur_fsr((__art)); \ + }) + +#endif /* __XTENSAHIFI3INTRIN_H */ diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 3cab466759d9f..0a1ba6943f169 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -5422,6 +5422,7 @@ bool Sema::CheckXtensaBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { +#include "clang/Basic/XtensaSemaCheck.inc" default: return false; case Xtensa::BI__builtin_xtensa_mul_ad_ll: @@ -5491,6 +5492,25 @@ bool Sema::CheckXtensaBuiltinFunctionCall(unsigned BuiltinID, return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || SemaBuiltinConstantArgRange(TheCall, 2, 0, 1) || SemaBuiltinConstantArgRange(TheCall, 3, 2, 3); + case Xtensa::BI__builtin_xtensa_xt_trunc_s: + case Xtensa::BI__builtin_xtensa_xt_utrunc_s: + case Xtensa::BI__builtin_xtensa_xt_float_s: + case Xtensa::BI__builtin_xtensa_xt_ufloat_s: + case Xtensa::BI__builtin_xtensa_xt_ceil_s: + case Xtensa::BI__builtin_xtensa_xt_floor_s: + case Xtensa::BI__builtin_xtensa_xt_round_s: + i = 1; + l = 0; + u = 15; + break; + case Xtensa::BI__builtin_xtensa_xt_lsi: + case Xtensa::BI__builtin_xtensa_xt_lsip: + return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1020) || + SemaBuiltinConstantArgMultiple(TheCall, 1, 4); + case Xtensa::BI__builtin_xtensa_xt_ssi: + case Xtensa::BI__builtin_xtensa_xt_ssip: + return SemaBuiltinConstantArgRange(TheCall, 2, 0, 1020) || + SemaBuiltinConstantArgMultiple(TheCall, 2, 4); case Xtensa::BI__builtin_xtensa_ee_andq: case Xtensa::BI__builtin_xtensa_ee_cmul_s16: case Xtensa::BI__builtin_xtensa_ee_fft_cmul_s16_st_xp: @@ -5719,10 +5739,51 @@ bool Sema::CheckXtensaBuiltinFunctionCall(unsigned BuiltinID, SemaBuiltinConstantArgRange(TheCall, 3, 0, 7) && SemaBuiltinConstantArgRange(TheCall, 4, 0, 7) && SemaBuiltinConstantArgRange(TheCall, 5, 0, 7); + case Xtensa::BI__builtin_xtensa_ae_int32x2: + case Xtensa::BI__builtin_xtensa_ae_int32: + return SemaBuiltinXtensaConversion(BuiltinID, TheCall); } return SemaBuiltinConstantArgRange(TheCall, i, l, u); } +bool Sema::SemaBuiltinXtensaConversion(unsigned BuiltinID, CallExpr *TheCall) { + unsigned MaxElems; + switch (BuiltinID) { + case Xtensa::BI__builtin_xtensa_ae_int32x2: + MaxElems = 2; + break; + case Xtensa::BI__builtin_xtensa_ae_int32: + MaxElems = 1; + break; + default: + llvm_unreachable("Unknown intrinsic ID"); + } + if (checkArgCount(*this, TheCall, 1)) + return true; + Expr *Arg = TheCall->getArg(0); + QualType QT = Arg->getType(); + if (auto *VecTy = QT->getAs()) { + unsigned NumEl = VecTy->getNumElements(); + QualType ElType = VecTy->getElementType(); + unsigned ElWidth = Context.getIntWidth(ElType); + QualType VecType = Context.getVectorType(Context.IntTy, MaxElems, + VectorType::GenericVector); + if (ElWidth != 32 || NumEl > MaxElems) + return Diag(TheCall->getBeginLoc(), + diag::err_typecheck_convert_incompatible) + << QT << VecType << 1 << 0 << 0; + return false; + } else { + if (!QT->isIntegerType()) + return Diag(TheCall->getBeginLoc(), + diag::err_typecheck_convert_incompatible) + << QT << Context.IntTy << 1 << 0 << 0; + + return false; + } + return false; +} + void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { const TargetInfo &TI = Context.getTargetInfo(); // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at @@ -5756,7 +5817,6 @@ bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, case NVPTX::BI__nvvm_cp_async_cg_shared_global_16: return checkArgCountAtMost(*this, TheCall, 3); } - return false; } diff --git a/clang/test/CodeGen/xtensa-abi.c b/clang/test/CodeGen/Xtensa/xtensa-abi.c similarity index 83% rename from clang/test/CodeGen/xtensa-abi.c rename to clang/test/CodeGen/Xtensa/xtensa-abi.c index e858f8e88044d..297df864abc88 100644 --- a/clang/test/CodeGen/xtensa-abi.c +++ b/clang/test/CodeGen/Xtensa/xtensa-abi.c @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; + #define __malloc_like __attribute__((__malloc__)) char *bufalloc () __malloc_like ;//__result_use_check; extern void* malloc (unsigned size); @@ -26,3 +28,7 @@ void callee_struct_a16b_2(struct S16 a, int b) {} void callee_struct_a16b_3(int a, struct S16 b) {} // CHECK: define dso_local void @callee_struct_a16b_3(i32 noundef %a, ptr noundef byval(%struct.S16) align 16 %b) + +xtbool test_xtbool(xtbool a) {} + +// CHECK: define dso_local <1 x i1> @test_xtbool(<1 x i1> noundef %a) diff --git a/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c new file mode 100644 index 0000000000000..150eface40a0b --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c @@ -0,0 +1,48 @@ +// RUN: %clang_cc1 -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; + +float test_float_s(int a) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.float.s(i32 %{{.*}}, i32 1) + return __builtin_xtensa_xt_float_s(a, 1); +} + +float test_ufloat_s(int a) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.ufloat.s(i32 %{{.*}}, i32 1) + return __builtin_xtensa_xt_ufloat_s(a, 1); +} + +int test_trunc_s(float a) { + // CHECK: %{{.*}} = call i32 @llvm.xtensa.xt.trunc.s(float %{{.*}}, i32 1) + return __builtin_xtensa_xt_trunc_s(a, 1); +} + +int test_utrunc_s(float a) { + // CHECK: %{{.*}} = call i32 @llvm.xtensa.xt.utrunc.s(float %{{.*}}, i32 1) + return __builtin_xtensa_xt_utrunc_s(a, 1); +} + +float test_add_s(float a, float b) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.add.s(float %{{.*}}, float %{{.*}}) + return __builtin_xtensa_xt_add_s(a, b); +} + +float test_sub_s(float a, float b) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.sub.s(float %{{.*}}, float %{{.*}}) + return __builtin_xtensa_xt_sub_s(a, b); +} + +float test_mul_s(float a, float b) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.mul.s(float %{{.*}}, float %{{.*}}) + return __builtin_xtensa_xt_mul_s(a, b); +} + +float test_xt_lsip(float * a0) { + // CHECK: %{{.*}} = call { float, ptr } @llvm.xtensa.xt.lsip(ptr %{{.*}}, i32 0) + return __builtin_xtensa_xt_lsip(&a0, 0); +} + +float test_xt_lsxp(float * a0, int a1) { + // CHECK: %{{.*}} = call { float, ptr } @llvm.xtensa.xt.lsxp(ptr %{{.*}}, i32 %{{.*}}) + return __builtin_xtensa_xt_lsxp(&a0, a1); +} diff --git a/clang/test/CodeGen/Xtensa/xtensa-gen-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-gen-intrinsics.c new file mode 100644 index 0000000000000..2b8de3d7b6d33 --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-gen-intrinsics.c @@ -0,0 +1,88 @@ +# Fails on Winodws due to incorrectly built command line: `'(': command not found`, so disable it temporarily +# UNSUPPORTED: system-windows + +# RUN: python3 %s > %t && ( %clang_cc1 -Dxtfloat=float -O0 -triple=xtensa %t -o - -emit-llvm | FileCheck %t ) + +FIXTURES = [ +('xtfloat', 'xt_addexp_s', ['xtfloat', 'xtfloat']) , +('xtfloat', 'xt_addexpm_s', ['xtfloat', 'xtfloat']) , +('int', 'xt_ceil_s', ['xtfloat', 0]) , +('xtfloat', 'xt_div0_s', ['xtfloat']) , +('xtfloat', 'xt_divn_s', ['xtfloat', 'xtfloat', 'xtfloat']) , +('int', 'xt_floor_s', ['xtfloat', 0]) , +('xtfloat', 'xt_lsi', ['xtfloat*', 0]) , +('xtfloat', 'xt_lsx', ['xtfloat*', 'int']) , +('xtfloat', 'xt_madd_s', ['xtfloat', 'xtfloat', 'xtfloat']) , +('xtfloat', 'xt_maddn_s', ['xtfloat', 'xtfloat', 'xtfloat']) , +('xtfloat', 'xt_mkdadj_s', ['xtfloat', 'xtfloat']) , +('xtfloat', 'xt_mksadj_s', ['xtfloat']) , +('xtfloat', 'xt_mov_s', ['xtfloat']) , +('xtfloat', 'xt_moveqz_s', ['xtfloat', 'xtfloat', 'int']) , +('xtfloat', 'xt_movgez_s', ['xtfloat', 'xtfloat', 'int']) , +('xtfloat', 'xt_movltz_s', ['xtfloat', 'xtfloat', 'int']) , +('xtfloat', 'xt_movnez_s', ['xtfloat', 'xtfloat', 'int']) , +('xtfloat', 'xt_msub_s', ['xtfloat', 'xtfloat', 'xtfloat']) , +('xtfloat', 'xt_neg_s', ['xtfloat']) , +('xtfloat', 'xt_nexp01_s', ['xtfloat']) , +('xtfloat', 'xt_recip0_s', ['xtfloat']) , +('int', 'xt_rfr', ['xtfloat']) , +('int', 'xt_round_s', ['xtfloat', 0]) , +('xtfloat', 'xt_rsqrt0_s', ['xtfloat']) , +('int', 'xt_rur_fcr', []) , +('int', 'xt_rur_fsr', []) , +('xtfloat', 'xt_sqrt0_s', ['xtfloat']) , +('void', 'xt_ssi', ['xtfloat', 'xtfloat*', 0]) , +('xtfloat*', 'xt_ssip', ['xtfloat', 'xtfloat*', 0]) , +('void', 'xt_ssx', ['xtfloat', 'xtfloat*', 'int']) , +('xtfloat*', 'xt_ssxp', ['xtfloat', 'xtfloat*', 'int']) , +('xtfloat', 'xt_wfr', ['int']) , +('void', 'xt_wur_fcr', ['int']) , +('void', 'xt_wur_fsr', ['int']) , +] + +from dataclasses import dataclass + +TYPES = { + 'xtfloat' : 'float', + 'int' : 'i32', + 'void':'void' +} + +def ctype2llvm(typ): + if '*' in typ: + return 'ptr' + else: + return TYPES[typ] + + +template = """ +{ret} test_{fun}({fun_args}) {{ + // CHECK: {assign} call {llvm_ret} @llvm.xtensa.{builtin}({llvm_args}) + return __builtin_xtensa_{fun}({call_args}); +}} +""" + +@dataclass +class F: + ret: str + fun : str + args: [str] + #builtin + #llvm_ret + #llvm_args + #call_args + +for f in FIXTURES: + f = F(*f) + args = f.args + f.fun_args = ", ".join( + ['%s a%d' % (a,i) for i,a, in enumerate(args) if isinstance(a,str)]) + f.builtin = f.fun.replace('_','.') + f.llvm_args = ", ".join( + [('%s {{.*}}' % ctype2llvm(a)) if isinstance(a,str) else ('i32 %d' % a) + for i,a, in enumerate(args)]) + f.call_args = ", ".join(['a%d' % i if isinstance(a,str) else str(a) + for i,a in enumerate(args)]) + f.llvm_ret = ctype2llvm(f.ret) + f.assign = '' if f.ret == 'void' else '{{.*}} =' + print(template.format(**f.__dict__)) diff --git a/clang/test/CodeGen/Xtensa/xtensa-hifi-conversions.c b/clang/test/CodeGen/Xtensa/xtensa-hifi-conversions.c new file mode 100644 index 0000000000000..506e785717e6d --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-hifi-conversions.c @@ -0,0 +1,48 @@ +// RUN: split-file %s %t +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/correct.c | FileCheck %t/correct.c +// RUN: not %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/bad_vec.c 2>&1 | FileCheck %t/bad_vec.c + +//--- correct.c + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); + +ae_int32x2 test_ae_int32x2_from_int(int a) { +// CHECK-LABEL: @test_ae_int32x2_from_int(i +// CHECK: %[[INS:.*]] = insertelement <2 x i32> poison, i32 %{{.*}}, i64 0 +// CHECK: %[[SHUF:.*]] = shufflevector <2 x i32> %[[INS]], <2 x i32> poison, <2 x i32> zeroinitializer +// CHECK: ret <2 x i32> %[[SHUF]] +return __builtin_xtensa_ae_int32x2(a); +} + +ae_int32x2 test_ae_int32x2_from_ae_int32(ae_int32 a) { +// CHECK-LABEL: @test_ae_int32x2_from_ae_int32( +// CHECK: %[[SHUF:.*]] = shufflevector <1 x i32> %{{.*}}, <1 x i32> poison, <2 x i32> zeroinitializer +// CHECK: ret <2 x i32> %[[SHUF]] +return __builtin_xtensa_ae_int32x2(a); +} + +ae_int32x2 test_ae_int32x2_from_ae_int32x2(ae_int32x2 a) { +// CHECK: {{.*}}<2 x i32> @test_ae_int32x2_from_ae_int32x2(<2 x i32>{{.*}} %[[A:.*]]) +// CHECK: ret <2 x i32> %[[A]] +return __builtin_xtensa_ae_int32x2(a); +} + +ae_int32x2 test_ae_int32x2_from_short(short a) { +// CHECK-LABEL: @test_ae_int32x2_from_short( +// CHECK: %[[SEXT:.*]] = sext i16 %{{.*}} to i32 +// CHECK: %[[INS:.*]] = insertelement <2 x i32> poison, i32 %[[SEXT]], i64 0 +// CHECK: %[[SHUF:.*]] = shufflevector <2 x i32> %[[INS]], <2 x i32> poison, <2 x i32> zeroinitializer +// CHECK: ret <2 x i32> %[[SHUF]] +return __builtin_xtensa_ae_int32x2(a); +} + +//--- bad_vec.c + +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); + +ae_int32x2 test_ae_int32x2_from_bad_vec(ae_int16x4 a) { +// CHECK: error: passing 'ae_int16x4' {{.*}} to parameter of incompatible type +return __builtin_xtensa_ae_int32x2(a); +} diff --git a/clang/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.c new file mode 100644 index 0000000000000..13b07f3a5cc74 --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.c @@ -0,0 +1,21408 @@ +// RUN: split-file %s %t +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs16s.c | FileCheck %t/ae_abs16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs24s.c | FileCheck %t/ae_abs24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs32.c | FileCheck %t/ae_abs32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs32s.c | FileCheck %t/ae_abs32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs64.c | FileCheck %t/ae_abs64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs64s.c | FileCheck %t/ae_abs64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add16.c | FileCheck %t/ae_add16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add16s.c | FileCheck %t/ae_add16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add24s.c | FileCheck %t/ae_add24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add32.c | FileCheck %t/ae_add32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add32_hl_lh.c | FileCheck %t/ae_add32_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add32s.c | FileCheck %t/ae_add32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add64.c | FileCheck %t/ae_add64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add64s.c | FileCheck %t/ae_add64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_addbrba32.c | FileCheck %t/ae_addbrba32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_addsub32.c | FileCheck %t/ae_addsub32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_addsub32s.c | FileCheck %t/ae_addsub32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_and.c | FileCheck %t/ae_and.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt32x2f16_10.c | FileCheck %t/ae_cvt32x2f16_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt32x2f16_32.c | FileCheck %t/ae_cvt32x2f16_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt48a32.c | FileCheck %t/ae_cvt48a32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt64a32.c | FileCheck %t/ae_cvt64a32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt64f32_h.c | FileCheck %t/ae_cvt64f32_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvta32f24s_h.c | FileCheck %t/ae_cvta32f24s_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvta32f24s_l.c | FileCheck %t/ae_cvta32f24s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvtq56a32s.c | FileCheck %t/ae_cvtq56a32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvtq56p32s_h.c | FileCheck %t/ae_cvtq56p32s_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvtq56p32s_l.c | FileCheck %t/ae_cvtq56p32s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_db.c | FileCheck %t/ae_db.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_db_ic.c | FileCheck %t/ae_db_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_db_ip.c | FileCheck %t/ae_db_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_dbi.c | FileCheck %t/ae_dbi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_dbi_ic.c | FileCheck %t/ae_dbi_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_dbi_ip.c | FileCheck %t/ae_dbi_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_div64d32_h.c | FileCheck %t/ae_div64d32_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_div64d32_l.c | FileCheck %t/ae_div64d32_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_eq16.c | FileCheck %t/ae_eq16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_eq32.c | FileCheck %t/ae_eq32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_eq64.c | FileCheck %t/ae_eq64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_i.c | FileCheck %t/ae_l16_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_ip.c | FileCheck %t/ae_l16_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_x.c | FileCheck %t/ae_l16_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_xc.c | FileCheck %t/ae_l16_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_xp.c | FileCheck %t/ae_l16_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_i.c | FileCheck %t/ae_l16m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_iu.c | FileCheck %t/ae_l16m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_x.c | FileCheck %t/ae_l16m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_xc.c | FileCheck %t/ae_l16m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_xu.c | FileCheck %t/ae_l16m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_i.c | FileCheck %t/ae_l16x2m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_iu.c | FileCheck %t/ae_l16x2m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_x.c | FileCheck %t/ae_l16x2m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_xc.c | FileCheck %t/ae_l16x2m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_xu.c | FileCheck %t/ae_l16x2m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_i.c | FileCheck %t/ae_l16x4_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_ip.c | FileCheck %t/ae_l16x4_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_ric.c | FileCheck %t/ae_l16x4_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_rip.c | FileCheck %t/ae_l16x4_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_x.c | FileCheck %t/ae_l16x4_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_xc.c | FileCheck %t/ae_l16x4_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_xp.c | FileCheck %t/ae_l16x4_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_i.c | FileCheck %t/ae_l32_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_ip.c | FileCheck %t/ae_l32_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_x.c | FileCheck %t/ae_l32_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_xc.c | FileCheck %t/ae_l32_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_xp.c | FileCheck %t/ae_l32_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_i.c | FileCheck %t/ae_l32f24_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_ip.c | FileCheck %t/ae_l32f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_x.c | FileCheck %t/ae_l32f24_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_xc.c | FileCheck %t/ae_l32f24_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_xp.c | FileCheck %t/ae_l32f24_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_i.c | FileCheck %t/ae_l32m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_iu.c | FileCheck %t/ae_l32m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_x.c | FileCheck %t/ae_l32m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_xc.c | FileCheck %t/ae_l32m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_xu.c | FileCheck %t/ae_l32m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_i.c | FileCheck %t/ae_l32x2_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_ip.c | FileCheck %t/ae_l32x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_ric.c | FileCheck %t/ae_l32x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_rip.c | FileCheck %t/ae_l32x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_x.c | FileCheck %t/ae_l32x2_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_xc.c | FileCheck %t/ae_l32x2_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_xp.c | FileCheck %t/ae_l32x2_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_i.c | FileCheck %t/ae_l32x2f24_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_ip.c | FileCheck %t/ae_l32x2f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_ric.c | FileCheck %t/ae_l32x2f24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_rip.c | FileCheck %t/ae_l32x2f24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_x.c | FileCheck %t/ae_l32x2f24_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_xc.c | FileCheck %t/ae_l32x2f24_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_xp.c | FileCheck %t/ae_l32x2f24_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_i.c | FileCheck %t/ae_l64_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_ip.c | FileCheck %t/ae_l64_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_x.c | FileCheck %t/ae_l64_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_xc.c | FileCheck %t/ae_l64_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_xp.c | FileCheck %t/ae_l64_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4_ic.c | FileCheck %t/ae_la16x4_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4_ip.c | FileCheck %t/ae_la16x4_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4_ric.c | FileCheck %t/ae_la16x4_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4_rip.c | FileCheck %t/ae_la16x4_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4neg_pc.c | FileCheck %t/ae_la16x4neg_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4pos_pc.c | FileCheck %t/ae_la16x4pos_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24_ic.c | FileCheck %t/ae_la24_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24_ip.c | FileCheck %t/ae_la24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24_ric.c | FileCheck %t/ae_la24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24_rip.c | FileCheck %t/ae_la24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24neg_pc.c | FileCheck %t/ae_la24neg_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24pos_pc.c | FileCheck %t/ae_la24pos_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2_ic.c | FileCheck %t/ae_la24x2_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2_ip.c | FileCheck %t/ae_la24x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2_ric.c | FileCheck %t/ae_la24x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2_rip.c | FileCheck %t/ae_la24x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2neg_pc.c | FileCheck %t/ae_la24x2neg_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2pos_pc.c | FileCheck %t/ae_la24x2pos_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2_ic.c | FileCheck %t/ae_la32x2_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2_ip.c | FileCheck %t/ae_la32x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2_ric.c | FileCheck %t/ae_la32x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2_rip.c | FileCheck %t/ae_la32x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2f24_ic.c | FileCheck %t/ae_la32x2f24_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2f24_ip.c | FileCheck %t/ae_la32x2f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2f24_ric.c | FileCheck %t/ae_la32x2f24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2f24_rip.c | FileCheck %t/ae_la32x2f24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2neg_pc.c | FileCheck %t/ae_la32x2neg_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2pos_pc.c | FileCheck %t/ae_la32x2pos_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la64_pp.c | FileCheck %t/ae_la64_pp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lalign64_i.c | FileCheck %t/ae_lalign64_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lb.c | FileCheck %t/ae_lb.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbi.c | FileCheck %t/ae_lbi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbk.c | FileCheck %t/ae_lbk.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbki.c | FileCheck %t/ae_lbki.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbs.c | FileCheck %t/ae_lbs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbsi.c | FileCheck %t/ae_lbsi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_le16.c | FileCheck %t/ae_le16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_le32.c | FileCheck %t/ae_le32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_le64.c | FileCheck %t/ae_le64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lt16.c | FileCheck %t/ae_lt16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lt32.c | FileCheck %t/ae_lt32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lt64.c | FileCheck %t/ae_lt64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_max32.c | FileCheck %t/ae_max32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_max64.c | FileCheck %t/ae_max64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_maxabs32s.c | FileCheck %t/ae_maxabs32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_maxabs64s.c | FileCheck %t/ae_maxabs64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_min32.c | FileCheck %t/ae_min32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_min64.c | FileCheck %t/ae_min64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_minabs32s.c | FileCheck %t/ae_minabs32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_minabs64s.c | FileCheck %t/ae_minabs64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mov.c | FileCheck %t/ae_mov.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad16_0.c | FileCheck %t/ae_movad16_0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad16_1.c | FileCheck %t/ae_movad16_1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad16_2.c | FileCheck %t/ae_movad16_2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad16_3.c | FileCheck %t/ae_movad16_3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad32_h.c | FileCheck %t/ae_movad32_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad32_l.c | FileCheck %t/ae_movad32_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movalign.c | FileCheck %t/ae_movalign.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movda16.c | FileCheck %t/ae_movda16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movda16x2.c | FileCheck %t/ae_movda16x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movda32.c | FileCheck %t/ae_movda32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movda32x2.c | FileCheck %t/ae_movda32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movf16x4.c | FileCheck %t/ae_movf16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movf32x2.c | FileCheck %t/ae_movf32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movf64.c | FileCheck %t/ae_movf64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movi.c | FileCheck %t/ae_movi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movt16x4.c | FileCheck %t/ae_movt16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movt32x2.c | FileCheck %t/ae_movt32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movt64.c | FileCheck %t/ae_movt64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul16x4.c | FileCheck %t/ae_mul16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32_hh.c | FileCheck %t/ae_mul32_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32_lh.c | FileCheck %t/ae_mul32_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32_ll.c | FileCheck %t/ae_mul32_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32_ll_s2.c | FileCheck %t/ae_mul32_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32u_ll.c | FileCheck %t/ae_mul32u_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h0.c | FileCheck %t/ae_mul32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h0_s2.c | FileCheck %t/ae_mul32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h1.c | FileCheck %t/ae_mul32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h1_s2.c | FileCheck %t/ae_mul32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h2.c | FileCheck %t/ae_mul32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h2_s2.c | FileCheck %t/ae_mul32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h3.c | FileCheck %t/ae_mul32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h3_s2.c | FileCheck %t/ae_mul32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l0.c | FileCheck %t/ae_mul32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l0_s2.c | FileCheck %t/ae_mul32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l1.c | FileCheck %t/ae_mul32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l1_s2.c | FileCheck %t/ae_mul32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l2.c | FileCheck %t/ae_mul32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l2_s2.c | FileCheck %t/ae_mul32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l3.c | FileCheck %t/ae_mul32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l3_s2.c | FileCheck %t/ae_mul32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula16x4.c | FileCheck %t/ae_mula16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32_hh.c | FileCheck %t/ae_mula32_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32_lh.c | FileCheck %t/ae_mula32_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32_ll.c | FileCheck %t/ae_mula32_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32_ll_s2.c | FileCheck %t/ae_mula32_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32u_ll.c | FileCheck %t/ae_mula32u_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h0.c | FileCheck %t/ae_mula32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h0_s2.c | FileCheck %t/ae_mula32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h1.c | FileCheck %t/ae_mula32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h1_s2.c | FileCheck %t/ae_mula32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h2.c | FileCheck %t/ae_mula32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h2_s2.c | FileCheck %t/ae_mula32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h3.c | FileCheck %t/ae_mula32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h3_s2.c | FileCheck %t/ae_mula32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l0.c | FileCheck %t/ae_mula32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l0_s2.c | FileCheck %t/ae_mula32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l1.c | FileCheck %t/ae_mula32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l1_s2.c | FileCheck %t/ae_mula32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l2.c | FileCheck %t/ae_mula32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l2_s2.c | FileCheck %t/ae_mula32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l3.c | FileCheck %t/ae_mula32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l3_s2.c | FileCheck %t/ae_mula32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad24_hh_ll.c | FileCheck %t/ae_mulaad24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad24_hh_ll_s2.c | FileCheck %t/ae_mulaad24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad24_hl_lh.c | FileCheck %t/ae_mulaad24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad24_hl_lh_s2.c | FileCheck %t/ae_mulaad24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h0_l1.c | FileCheck %t/ae_mulaad32x16_h0_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h0_l1_s2.c | FileCheck %t/ae_mulaad32x16_h0_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h1_l0.c | FileCheck %t/ae_mulaad32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h1_l0_s2.c | FileCheck %t/ae_mulaad32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h2_l3.c | FileCheck %t/ae_mulaad32x16_h2_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h2_l3_s2.c | FileCheck %t/ae_mulaad32x16_h2_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h3_l2.c | FileCheck %t/ae_mulaad32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h3_l2_s2.c | FileCheck %t/ae_mulaad32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_11_00.c | FileCheck %t/ae_mulaafd16ss_11_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_11_00_s2.c | FileCheck %t/ae_mulaafd16ss_11_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_13_02.c | FileCheck %t/ae_mulaafd16ss_13_02.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_13_02_s2.c | FileCheck %t/ae_mulaafd16ss_13_02_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_33_22.c | FileCheck %t/ae_mulaafd16ss_33_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_33_22_s2.c | FileCheck %t/ae_mulaafd16ss_33_22_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd24_hh_ll.c | FileCheck %t/ae_mulaafd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd24_hh_ll_s2.c | FileCheck %t/ae_mulaafd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd24_hl_lh.c | FileCheck %t/ae_mulaafd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd24_hl_lh_s2.c | FileCheck %t/ae_mulaafd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h0_l1.c | FileCheck %t/ae_mulaafd32x16_h0_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h0_l1_s2.c | FileCheck %t/ae_mulaafd32x16_h0_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h1_l0.c | FileCheck %t/ae_mulaafd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h1_l0_s2.c | FileCheck %t/ae_mulaafd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h2_l3.c | FileCheck %t/ae_mulaafd32x16_h2_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h2_l3_s2.c | FileCheck %t/ae_mulaafd32x16_h2_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h3_l2.c | FileCheck %t/ae_mulaafd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h3_l2_s2.c | FileCheck %t/ae_mulaafd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulac24.c | FileCheck %t/ae_mulac24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulac32x16_h.c | FileCheck %t/ae_mulac32x16_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulac32x16_l.c | FileCheck %t/ae_mulac32x16_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_00.c | FileCheck %t/ae_mulaf16ss_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_00_s2.c | FileCheck %t/ae_mulaf16ss_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_10.c | FileCheck %t/ae_mulaf16ss_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_11.c | FileCheck %t/ae_mulaf16ss_11.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_20.c | FileCheck %t/ae_mulaf16ss_20.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_21.c | FileCheck %t/ae_mulaf16ss_21.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_22.c | FileCheck %t/ae_mulaf16ss_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_30.c | FileCheck %t/ae_mulaf16ss_30.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_31.c | FileCheck %t/ae_mulaf16ss_31.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_32.c | FileCheck %t/ae_mulaf16ss_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_33.c | FileCheck %t/ae_mulaf16ss_33.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16x4ss.c | FileCheck %t/ae_mulaf16x4ss.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32r_hh.c | FileCheck %t/ae_mulaf32r_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32r_lh.c | FileCheck %t/ae_mulaf32r_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32r_ll.c | FileCheck %t/ae_mulaf32r_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32r_ll_s2.c | FileCheck %t/ae_mulaf32r_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32s_hh.c | FileCheck %t/ae_mulaf32s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32s_lh.c | FileCheck %t/ae_mulaf32s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32s_ll.c | FileCheck %t/ae_mulaf32s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32s_ll_s2.c | FileCheck %t/ae_mulaf32s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h0.c | FileCheck %t/ae_mulaf32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h0_s2.c | FileCheck %t/ae_mulaf32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h1.c | FileCheck %t/ae_mulaf32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h1_s2.c | FileCheck %t/ae_mulaf32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h2.c | FileCheck %t/ae_mulaf32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h2_s2.c | FileCheck %t/ae_mulaf32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h3.c | FileCheck %t/ae_mulaf32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h3_s2.c | FileCheck %t/ae_mulaf32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l0.c | FileCheck %t/ae_mulaf32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l0_s2.c | FileCheck %t/ae_mulaf32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l1.c | FileCheck %t/ae_mulaf32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l1_s2.c | FileCheck %t/ae_mulaf32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l2.c | FileCheck %t/ae_mulaf32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l2_s2.c | FileCheck %t/ae_mulaf32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l3.c | FileCheck %t/ae_mulaf32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l3_s2.c | FileCheck %t/ae_mulaf32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf48q32sp16s_l.c | FileCheck %t/ae_mulaf48q32sp16s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf48q32sp16s_l_s2.c | FileCheck %t/ae_mulaf48q32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf48q32sp16u_l.c | FileCheck %t/ae_mulaf48q32sp16u_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf48q32sp16u_l_s2.c | FileCheck %t/ae_mulaf48q32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafc24ra.c | FileCheck %t/ae_mulafc24ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafc32x16ras_h.c | FileCheck %t/ae_mulafc32x16ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafc32x16ras_l.c | FileCheck %t/ae_mulafc32x16ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd24x2_fir_h.c | FileCheck %t/ae_mulafd24x2_fir_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd24x2_fir_l.c | FileCheck %t/ae_mulafd24x2_fir_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd32x16x2_fir_hh.c | FileCheck %t/ae_mulafd32x16x2_fir_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd32x16x2_fir_hl.c | FileCheck %t/ae_mulafd32x16x2_fir_hl.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd32x16x2_fir_lh.c | FileCheck %t/ae_mulafd32x16x2_fir_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd32x16x2_fir_ll.c | FileCheck %t/ae_mulafd32x16x2_fir_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp24x2r.c | FileCheck %t/ae_mulafp24x2r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp24x2r_s2.c | FileCheck %t/ae_mulafp24x2r_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp24x2ra.c | FileCheck %t/ae_mulafp24x2ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp24x2ra_s2.c | FileCheck %t/ae_mulafp24x2ra_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2ras_h.c | FileCheck %t/ae_mulafp32x16x2ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2ras_h_s2.c | FileCheck %t/ae_mulafp32x16x2ras_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2ras_l.c | FileCheck %t/ae_mulafp32x16x2ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2ras_l_s2.c | FileCheck %t/ae_mulafp32x16x2ras_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2rs_h.c | FileCheck %t/ae_mulafp32x16x2rs_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2rs_h_s2.c | FileCheck %t/ae_mulafp32x16x2rs_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2rs_l.c | FileCheck %t/ae_mulafp32x16x2rs_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2rs_l_s2.c | FileCheck %t/ae_mulafp32x16x2rs_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x2ras.c | FileCheck %t/ae_mulafp32x2ras.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x2rs.c | FileCheck %t/ae_mulafp32x2rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafq32sp24s_h_s2.c | FileCheck %t/ae_mulafq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafq32sp24s_l_s2.c | FileCheck %t/ae_mulafq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap24x2.c | FileCheck %t/ae_mulap24x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap24x2_s2.c | FileCheck %t/ae_mulap24x2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap32x16x2_h.c | FileCheck %t/ae_mulap32x16x2_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap32x16x2_l.c | FileCheck %t/ae_mulap32x16x2_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap32x2.c | FileCheck %t/ae_mulap32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaq32sp16s_l_s2.c | FileCheck %t/ae_mulaq32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaq32sp16u_l_s2.c | FileCheck %t/ae_mulaq32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mularfq32sp24s_h_s2.c | FileCheck %t/ae_mularfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mularfq32sp24s_l_s2.c | FileCheck %t/ae_mularfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_hh.c | FileCheck %t/ae_mulas32f48p16s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_hh_s2.c | FileCheck %t/ae_mulas32f48p16s_hh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_lh.c | FileCheck %t/ae_mulas32f48p16s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_lh_s2.c | FileCheck %t/ae_mulas32f48p16s_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_ll.c | FileCheck %t/ae_mulas32f48p16s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_ll_s2.c | FileCheck %t/ae_mulas32f48p16s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd24_hh_ll.c | FileCheck %t/ae_mulasd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd24_hh_ll_s2.c | FileCheck %t/ae_mulasd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd24_hl_lh.c | FileCheck %t/ae_mulasd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd24_hl_lh_s2.c | FileCheck %t/ae_mulasd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd32x16_h1_l0.c | FileCheck %t/ae_mulasd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd32x16_h1_l0_s2.c | FileCheck %t/ae_mulasd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd32x16_h3_l2.c | FileCheck %t/ae_mulasd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd32x16_h3_l2_s2.c | FileCheck %t/ae_mulasd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd24_hh_ll.c | FileCheck %t/ae_mulasfd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd24_hh_ll_s2.c | FileCheck %t/ae_mulasfd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd24_hl_lh.c | FileCheck %t/ae_mulasfd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd24_hl_lh_s2.c | FileCheck %t/ae_mulasfd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd32x16_h1_l0.c | FileCheck %t/ae_mulasfd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd32x16_h1_l0_s2.c | FileCheck %t/ae_mulasfd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd32x16_h3_l2.c | FileCheck %t/ae_mulasfd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd32x16_h3_l2_s2.c | FileCheck %t/ae_mulasfd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulc24.c | FileCheck %t/ae_mulc24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulc32x16_h.c | FileCheck %t/ae_mulc32x16_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulc32x16_l.c | FileCheck %t/ae_mulc32x16_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_00.c | FileCheck %t/ae_mulf16ss_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_00_s2.c | FileCheck %t/ae_mulf16ss_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_10.c | FileCheck %t/ae_mulf16ss_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_11.c | FileCheck %t/ae_mulf16ss_11.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_20.c | FileCheck %t/ae_mulf16ss_20.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_21.c | FileCheck %t/ae_mulf16ss_21.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_22.c | FileCheck %t/ae_mulf16ss_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_30.c | FileCheck %t/ae_mulf16ss_30.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_31.c | FileCheck %t/ae_mulf16ss_31.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_32.c | FileCheck %t/ae_mulf16ss_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_33.c | FileCheck %t/ae_mulf16ss_33.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16x4ss.c | FileCheck %t/ae_mulf16x4ss.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32r_hh.c | FileCheck %t/ae_mulf32r_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32r_lh.c | FileCheck %t/ae_mulf32r_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32r_ll.c | FileCheck %t/ae_mulf32r_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32r_ll_s2.c | FileCheck %t/ae_mulf32r_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32s_hh.c | FileCheck %t/ae_mulf32s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32s_lh.c | FileCheck %t/ae_mulf32s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32s_ll.c | FileCheck %t/ae_mulf32s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32s_ll_s2.c | FileCheck %t/ae_mulf32s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h0.c | FileCheck %t/ae_mulf32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h0_s2.c | FileCheck %t/ae_mulf32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h1.c | FileCheck %t/ae_mulf32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h1_s2.c | FileCheck %t/ae_mulf32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h2.c | FileCheck %t/ae_mulf32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h2_s2.c | FileCheck %t/ae_mulf32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h3.c | FileCheck %t/ae_mulf32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h3_s2.c | FileCheck %t/ae_mulf32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l0.c | FileCheck %t/ae_mulf32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l0_s2.c | FileCheck %t/ae_mulf32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l1.c | FileCheck %t/ae_mulf32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l1_s2.c | FileCheck %t/ae_mulf32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l2.c | FileCheck %t/ae_mulf32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l2_s2.c | FileCheck %t/ae_mulf32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l3.c | FileCheck %t/ae_mulf32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l3_s2.c | FileCheck %t/ae_mulf32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf48q32sp16s_l.c | FileCheck %t/ae_mulf48q32sp16s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf48q32sp16s_l_s2.c | FileCheck %t/ae_mulf48q32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf48q32sp16u_l.c | FileCheck %t/ae_mulf48q32sp16u_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf48q32sp16u_l_s2.c | FileCheck %t/ae_mulf48q32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfc24ra.c | FileCheck %t/ae_mulfc24ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfc32x16ras_h.c | FileCheck %t/ae_mulfc32x16ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfc32x16ras_l.c | FileCheck %t/ae_mulfc32x16ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd24x2_fir_h.c | FileCheck %t/ae_mulfd24x2_fir_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd24x2_fir_l.c | FileCheck %t/ae_mulfd24x2_fir_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd32x16x2_fir_hh.c | FileCheck %t/ae_mulfd32x16x2_fir_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd32x16x2_fir_hl.c | FileCheck %t/ae_mulfd32x16x2_fir_hl.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd32x16x2_fir_lh.c | FileCheck %t/ae_mulfd32x16x2_fir_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd32x16x2_fir_ll.c | FileCheck %t/ae_mulfd32x16x2_fir_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp16x4ras.c | FileCheck %t/ae_mulfp16x4ras.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp16x4s.c | FileCheck %t/ae_mulfp16x4s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp24x2r.c | FileCheck %t/ae_mulfp24x2r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp24x2r_s2.c | FileCheck %t/ae_mulfp24x2r_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp24x2ra.c | FileCheck %t/ae_mulfp24x2ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp24x2ra_s2.c | FileCheck %t/ae_mulfp24x2ra_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2ras_h.c | FileCheck %t/ae_mulfp32x16x2ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2ras_h_s2.c | FileCheck %t/ae_mulfp32x16x2ras_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2ras_l.c | FileCheck %t/ae_mulfp32x16x2ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2ras_l_s2.c | FileCheck %t/ae_mulfp32x16x2ras_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2rs_h.c | FileCheck %t/ae_mulfp32x16x2rs_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2rs_h_s2.c | FileCheck %t/ae_mulfp32x16x2rs_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2rs_l.c | FileCheck %t/ae_mulfp32x16x2rs_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2rs_l_s2.c | FileCheck %t/ae_mulfp32x16x2rs_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x2ras.c | FileCheck %t/ae_mulfp32x2ras.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x2rs.c | FileCheck %t/ae_mulfp32x2rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfq32sp24s_h_s2.c | FileCheck %t/ae_mulfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfq32sp24s_l_s2.c | FileCheck %t/ae_mulfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp24x2.c | FileCheck %t/ae_mulp24x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp24x2_s2.c | FileCheck %t/ae_mulp24x2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp32x16x2_h.c | FileCheck %t/ae_mulp32x16x2_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp32x16x2_l.c | FileCheck %t/ae_mulp32x16x2_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp32x2.c | FileCheck %t/ae_mulp32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulq32sp16s_l_s2.c | FileCheck %t/ae_mulq32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulq32sp16u_l_s2.c | FileCheck %t/ae_mulq32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulrfq32sp24s_h_s2.c | FileCheck %t/ae_mulrfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulrfq32sp24s_l_s2.c | FileCheck %t/ae_mulrfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls16x4.c | FileCheck %t/ae_muls16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32_hh.c | FileCheck %t/ae_muls32_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32_lh.c | FileCheck %t/ae_muls32_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32_ll.c | FileCheck %t/ae_muls32_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_hh.c | FileCheck %t/ae_muls32f48p16s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_hh_s2.c | FileCheck %t/ae_muls32f48p16s_hh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_lh.c | FileCheck %t/ae_muls32f48p16s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_lh_s2.c | FileCheck %t/ae_muls32f48p16s_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_ll.c | FileCheck %t/ae_muls32f48p16s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_ll_s2.c | FileCheck %t/ae_muls32f48p16s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32u_ll.c | FileCheck %t/ae_muls32u_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h0.c | FileCheck %t/ae_muls32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h0_s2.c | FileCheck %t/ae_muls32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h1.c | FileCheck %t/ae_muls32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h1_s2.c | FileCheck %t/ae_muls32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h2.c | FileCheck %t/ae_muls32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h2_s2.c | FileCheck %t/ae_muls32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h3.c | FileCheck %t/ae_muls32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h3_s2.c | FileCheck %t/ae_muls32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l0.c | FileCheck %t/ae_muls32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l0_s2.c | FileCheck %t/ae_muls32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l1.c | FileCheck %t/ae_muls32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l1_s2.c | FileCheck %t/ae_muls32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l2.c | FileCheck %t/ae_muls32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l2_s2.c | FileCheck %t/ae_muls32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l3.c | FileCheck %t/ae_muls32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l3_s2.c | FileCheck %t/ae_muls32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad24_hh_ll.c | FileCheck %t/ae_mulsad24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad24_hh_ll_s2.c | FileCheck %t/ae_mulsad24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad32x16_h1_l0.c | FileCheck %t/ae_mulsad32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad32x16_h1_l0_s2.c | FileCheck %t/ae_mulsad32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad32x16_h3_l2.c | FileCheck %t/ae_mulsad32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad32x16_h3_l2_s2.c | FileCheck %t/ae_mulsad32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd24_hh_ll.c | FileCheck %t/ae_mulsafd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd24_hh_ll_s2.c | FileCheck %t/ae_mulsafd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd32x16_h1_l0.c | FileCheck %t/ae_mulsafd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd32x16_h1_l0_s2.c | FileCheck %t/ae_mulsafd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd32x16_h3_l2.c | FileCheck %t/ae_mulsafd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd32x16_h3_l2_s2.c | FileCheck %t/ae_mulsafd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_00.c | FileCheck %t/ae_mulsf16ss_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_00_s2.c | FileCheck %t/ae_mulsf16ss_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_10.c | FileCheck %t/ae_mulsf16ss_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_11.c | FileCheck %t/ae_mulsf16ss_11.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_20.c | FileCheck %t/ae_mulsf16ss_20.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_21.c | FileCheck %t/ae_mulsf16ss_21.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_22.c | FileCheck %t/ae_mulsf16ss_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_30.c | FileCheck %t/ae_mulsf16ss_30.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_31.c | FileCheck %t/ae_mulsf16ss_31.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_32.c | FileCheck %t/ae_mulsf16ss_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_33.c | FileCheck %t/ae_mulsf16ss_33.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16x4ss.c | FileCheck %t/ae_mulsf16x4ss.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32r_hh.c | FileCheck %t/ae_mulsf32r_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32r_lh.c | FileCheck %t/ae_mulsf32r_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32r_ll.c | FileCheck %t/ae_mulsf32r_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32r_ll_s2.c | FileCheck %t/ae_mulsf32r_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32s_hh.c | FileCheck %t/ae_mulsf32s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32s_lh.c | FileCheck %t/ae_mulsf32s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32s_ll.c | FileCheck %t/ae_mulsf32s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h0.c | FileCheck %t/ae_mulsf32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h0_s2.c | FileCheck %t/ae_mulsf32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h1.c | FileCheck %t/ae_mulsf32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h1_s2.c | FileCheck %t/ae_mulsf32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h2.c | FileCheck %t/ae_mulsf32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h2_s2.c | FileCheck %t/ae_mulsf32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h3.c | FileCheck %t/ae_mulsf32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h3_s2.c | FileCheck %t/ae_mulsf32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l0.c | FileCheck %t/ae_mulsf32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l0_s2.c | FileCheck %t/ae_mulsf32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l1.c | FileCheck %t/ae_mulsf32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l1_s2.c | FileCheck %t/ae_mulsf32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l2.c | FileCheck %t/ae_mulsf32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l2_s2.c | FileCheck %t/ae_mulsf32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l3.c | FileCheck %t/ae_mulsf32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l3_s2.c | FileCheck %t/ae_mulsf32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf48q32sp16s_l.c | FileCheck %t/ae_mulsf48q32sp16s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf48q32sp16s_l_s2.c | FileCheck %t/ae_mulsf48q32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf48q32sp16u_l.c | FileCheck %t/ae_mulsf48q32sp16u_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf48q32sp16u_l_s2.c | FileCheck %t/ae_mulsf48q32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp24x2r.c | FileCheck %t/ae_mulsfp24x2r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp24x2r_s2.c | FileCheck %t/ae_mulsfp24x2r_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp24x2ra.c | FileCheck %t/ae_mulsfp24x2ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp24x2ra_s2.c | FileCheck %t/ae_mulsfp24x2ra_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2ras_h.c | FileCheck %t/ae_mulsfp32x16x2ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2ras_h_s2.c | FileCheck %t/ae_mulsfp32x16x2ras_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2ras_l.c | FileCheck %t/ae_mulsfp32x16x2ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2ras_l_s2.c | FileCheck %t/ae_mulsfp32x16x2ras_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2rs_h.c | FileCheck %t/ae_mulsfp32x16x2rs_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2rs_h_s2.c | FileCheck %t/ae_mulsfp32x16x2rs_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2rs_l.c | FileCheck %t/ae_mulsfp32x16x2rs_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2rs_l_s2.c | FileCheck %t/ae_mulsfp32x16x2rs_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x2ras.c | FileCheck %t/ae_mulsfp32x2ras.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x2rs.c | FileCheck %t/ae_mulsfp32x2rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfq32sp24s_h_s2.c | FileCheck %t/ae_mulsfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfq32sp24s_l_s2.c | FileCheck %t/ae_mulsfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp24x2.c | FileCheck %t/ae_mulsp24x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp24x2_s2.c | FileCheck %t/ae_mulsp24x2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp32x16x2_h.c | FileCheck %t/ae_mulsp32x16x2_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp32x16x2_l.c | FileCheck %t/ae_mulsp32x16x2_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp32x2.c | FileCheck %t/ae_mulsp32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsq32sp16s_l_s2.c | FileCheck %t/ae_mulsq32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsq32sp16u_l_s2.c | FileCheck %t/ae_mulsq32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsrfq32sp24s_h_s2.c | FileCheck %t/ae_mulsrfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsrfq32sp24s_l_s2.c | FileCheck %t/ae_mulsrfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_hh.c | FileCheck %t/ae_mulss32f48p16s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_hh_s2.c | FileCheck %t/ae_mulss32f48p16s_hh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_lh.c | FileCheck %t/ae_mulss32f48p16s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_lh_s2.c | FileCheck %t/ae_mulss32f48p16s_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_ll.c | FileCheck %t/ae_mulss32f48p16s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_ll_s2.c | FileCheck %t/ae_mulss32f48p16s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd24_hh_ll.c | FileCheck %t/ae_mulssd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd24_hh_ll_s2.c | FileCheck %t/ae_mulssd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd24_hl_lh.c | FileCheck %t/ae_mulssd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd24_hl_lh_s2.c | FileCheck %t/ae_mulssd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd32x16_h1_l0.c | FileCheck %t/ae_mulssd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd32x16_h1_l0_s2.c | FileCheck %t/ae_mulssd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd32x16_h3_l2.c | FileCheck %t/ae_mulssd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd32x16_h3_l2_s2.c | FileCheck %t/ae_mulssd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_11_00.c | FileCheck %t/ae_mulssfd16ss_11_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_11_00_s2.c | FileCheck %t/ae_mulssfd16ss_11_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_13_02.c | FileCheck %t/ae_mulssfd16ss_13_02.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_13_02_s2.c | FileCheck %t/ae_mulssfd16ss_13_02_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_33_22.c | FileCheck %t/ae_mulssfd16ss_33_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_33_22_s2.c | FileCheck %t/ae_mulssfd16ss_33_22_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd24_hh_ll.c | FileCheck %t/ae_mulssfd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd24_hh_ll_s2.c | FileCheck %t/ae_mulssfd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd24_hl_lh.c | FileCheck %t/ae_mulssfd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd24_hl_lh_s2.c | FileCheck %t/ae_mulssfd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd32x16_h1_l0.c | FileCheck %t/ae_mulssfd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd32x16_h1_l0_s2.c | FileCheck %t/ae_mulssfd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd32x16_h3_l2.c | FileCheck %t/ae_mulssfd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd32x16_h3_l2_s2.c | FileCheck %t/ae_mulssfd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad24_hh_ll.c | FileCheck %t/ae_mulzaad24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad24_hh_ll_s2.c | FileCheck %t/ae_mulzaad24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad24_hl_lh.c | FileCheck %t/ae_mulzaad24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad24_hl_lh_s2.c | FileCheck %t/ae_mulzaad24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h0_l1.c | FileCheck %t/ae_mulzaad32x16_h0_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h0_l1_s2.c | FileCheck %t/ae_mulzaad32x16_h0_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h1_l0.c | FileCheck %t/ae_mulzaad32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h1_l0_s2.c | FileCheck %t/ae_mulzaad32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h2_l3.c | FileCheck %t/ae_mulzaad32x16_h2_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h2_l3_s2.c | FileCheck %t/ae_mulzaad32x16_h2_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h3_l2.c | FileCheck %t/ae_mulzaad32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h3_l2_s2.c | FileCheck %t/ae_mulzaad32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_11_00.c | FileCheck %t/ae_mulzaafd16ss_11_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_11_00_s2.c | FileCheck %t/ae_mulzaafd16ss_11_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_13_02.c | FileCheck %t/ae_mulzaafd16ss_13_02.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_13_02_s2.c | FileCheck %t/ae_mulzaafd16ss_13_02_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_33_22.c | FileCheck %t/ae_mulzaafd16ss_33_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_33_22_s2.c | FileCheck %t/ae_mulzaafd16ss_33_22_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd24_hh_ll.c | FileCheck %t/ae_mulzaafd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd24_hh_ll_s2.c | FileCheck %t/ae_mulzaafd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd24_hl_lh.c | FileCheck %t/ae_mulzaafd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd24_hl_lh_s2.c | FileCheck %t/ae_mulzaafd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h0_l1.c | FileCheck %t/ae_mulzaafd32x16_h0_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h0_l1_s2.c | FileCheck %t/ae_mulzaafd32x16_h0_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h1_l0.c | FileCheck %t/ae_mulzaafd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzaafd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h2_l3.c | FileCheck %t/ae_mulzaafd32x16_h2_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h2_l3_s2.c | FileCheck %t/ae_mulzaafd32x16_h2_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h3_l2.c | FileCheck %t/ae_mulzaafd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzaafd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd24_hh_ll.c | FileCheck %t/ae_mulzasd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd24_hh_ll_s2.c | FileCheck %t/ae_mulzasd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd24_hl_lh.c | FileCheck %t/ae_mulzasd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd24_hl_lh_s2.c | FileCheck %t/ae_mulzasd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd32x16_h1_l0.c | FileCheck %t/ae_mulzasd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzasd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd32x16_h3_l2.c | FileCheck %t/ae_mulzasd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzasd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd24_hh_ll.c | FileCheck %t/ae_mulzasfd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd24_hh_ll_s2.c | FileCheck %t/ae_mulzasfd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd24_hl_lh.c | FileCheck %t/ae_mulzasfd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd24_hl_lh_s2.c | FileCheck %t/ae_mulzasfd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd32x16_h1_l0.c | FileCheck %t/ae_mulzasfd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzasfd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd32x16_h3_l2.c | FileCheck %t/ae_mulzasfd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzasfd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad24_hh_ll.c | FileCheck %t/ae_mulzsad24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad24_hh_ll_s2.c | FileCheck %t/ae_mulzsad24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad32x16_h1_l0.c | FileCheck %t/ae_mulzsad32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad32x16_h1_l0_s2.c | FileCheck %t/ae_mulzsad32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad32x16_h3_l2.c | FileCheck %t/ae_mulzsad32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad32x16_h3_l2_s2.c | FileCheck %t/ae_mulzsad32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd24_hh_ll.c | FileCheck %t/ae_mulzsafd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd24_hh_ll_s2.c | FileCheck %t/ae_mulzsafd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd32x16_h1_l0.c | FileCheck %t/ae_mulzsafd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzsafd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd32x16_h3_l2.c | FileCheck %t/ae_mulzsafd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzsafd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd24_hh_ll.c | FileCheck %t/ae_mulzssd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd24_hh_ll_s2.c | FileCheck %t/ae_mulzssd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd24_hl_lh.c | FileCheck %t/ae_mulzssd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd24_hl_lh_s2.c | FileCheck %t/ae_mulzssd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd32x16_h1_l0.c | FileCheck %t/ae_mulzssd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzssd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd32x16_h3_l2.c | FileCheck %t/ae_mulzssd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzssd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_11_00.c | FileCheck %t/ae_mulzssfd16ss_11_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_11_00_s2.c | FileCheck %t/ae_mulzssfd16ss_11_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_13_02.c | FileCheck %t/ae_mulzssfd16ss_13_02.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_13_02_s2.c | FileCheck %t/ae_mulzssfd16ss_13_02_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_33_22.c | FileCheck %t/ae_mulzssfd16ss_33_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_33_22_s2.c | FileCheck %t/ae_mulzssfd16ss_33_22_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd24_hh_ll.c | FileCheck %t/ae_mulzssfd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd24_hh_ll_s2.c | FileCheck %t/ae_mulzssfd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd24_hl_lh.c | FileCheck %t/ae_mulzssfd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd24_hl_lh_s2.c | FileCheck %t/ae_mulzssfd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd32x16_h1_l0.c | FileCheck %t/ae_mulzssfd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzssfd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd32x16_h3_l2.c | FileCheck %t/ae_mulzssfd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzssfd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_nand.c | FileCheck %t/ae_nand.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg16s.c | FileCheck %t/ae_neg16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg24s.c | FileCheck %t/ae_neg24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg32.c | FileCheck %t/ae_neg32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg32s.c | FileCheck %t/ae_neg32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg64.c | FileCheck %t/ae_neg64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg64s.c | FileCheck %t/ae_neg64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_nsa64.c | FileCheck %t/ae_nsa64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_nsaz16_0.c | FileCheck %t/ae_nsaz16_0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_nsaz32_l.c | FileCheck %t/ae_nsaz32_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_or.c | FileCheck %t/ae_or.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_pksr24.c | FileCheck %t/ae_pksr24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_pksr32.c | FileCheck %t/ae_pksr32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round16x4f32sasym.c | FileCheck %t/ae_round16x4f32sasym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round16x4f32ssym.c | FileCheck %t/ae_round16x4f32ssym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round24x2f48sasym.c | FileCheck %t/ae_round24x2f48sasym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round24x2f48ssym.c | FileCheck %t/ae_round24x2f48ssym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round32x2f48sasym.c | FileCheck %t/ae_round32x2f48sasym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round32x2f48ssym.c | FileCheck %t/ae_round32x2f48ssym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round32x2f64sasym.c | FileCheck %t/ae_round32x2f64sasym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round32x2f64ssym.c | FileCheck %t/ae_round32x2f64ssym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsp16f24asym.c | FileCheck %t/ae_roundsp16f24asym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsp16f24sym.c | FileCheck %t/ae_roundsp16f24sym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsp16q48x2asym.c | FileCheck %t/ae_roundsp16q48x2asym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsp16q48x2sym.c | FileCheck %t/ae_roundsp16q48x2sym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsq32f48asym.c | FileCheck %t/ae_roundsq32f48asym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsq32f48sym.c | FileCheck %t/ae_roundsq32f48sym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_i.c | FileCheck %t/ae_s16_0_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_ip.c | FileCheck %t/ae_s16_0_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_x.c | FileCheck %t/ae_s16_0_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_xc.c | FileCheck %t/ae_s16_0_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_xp.c | FileCheck %t/ae_s16_0_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_i.c | FileCheck %t/ae_s16m_l_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_iu.c | FileCheck %t/ae_s16m_l_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_x.c | FileCheck %t/ae_s16m_l_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_xc.c | FileCheck %t/ae_s16m_l_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_xu.c | FileCheck %t/ae_s16m_l_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_i.c | FileCheck %t/ae_s16x2m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_iu.c | FileCheck %t/ae_s16x2m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_x.c | FileCheck %t/ae_s16x2m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_xc.c | FileCheck %t/ae_s16x2m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_xu.c | FileCheck %t/ae_s16x2m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_i.c | FileCheck %t/ae_s16x4_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_ip.c | FileCheck %t/ae_s16x4_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_ric.c | FileCheck %t/ae_s16x4_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_rip.c | FileCheck %t/ae_s16x4_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_x.c | FileCheck %t/ae_s16x4_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_xc.c | FileCheck %t/ae_s16x4_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_xp.c | FileCheck %t/ae_s16x4_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_i.c | FileCheck %t/ae_s24ra64s_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_ip.c | FileCheck %t/ae_s24ra64s_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_x.c | FileCheck %t/ae_s24ra64s_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_xc.c | FileCheck %t/ae_s24ra64s_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_xp.c | FileCheck %t/ae_s24ra64s_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24x2ra64s_ip.c | FileCheck %t/ae_s24x2ra64s_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_i.c | FileCheck %t/ae_s32_l_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_ip.c | FileCheck %t/ae_s32_l_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_x.c | FileCheck %t/ae_s32_l_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_xc.c | FileCheck %t/ae_s32_l_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_xp.c | FileCheck %t/ae_s32_l_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_i.c | FileCheck %t/ae_s32f24_l_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_ip.c | FileCheck %t/ae_s32f24_l_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_x.c | FileCheck %t/ae_s32f24_l_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_xc.c | FileCheck %t/ae_s32f24_l_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_xp.c | FileCheck %t/ae_s32f24_l_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_i.c | FileCheck %t/ae_s32m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_iu.c | FileCheck %t/ae_s32m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_x.c | FileCheck %t/ae_s32m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_xc.c | FileCheck %t/ae_s32m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_xu.c | FileCheck %t/ae_s32m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_i.c | FileCheck %t/ae_s32ra64s_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_ip.c | FileCheck %t/ae_s32ra64s_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_x.c | FileCheck %t/ae_s32ra64s_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_xc.c | FileCheck %t/ae_s32ra64s_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_xp.c | FileCheck %t/ae_s32ra64s_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_i.c | FileCheck %t/ae_s32x2_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_ip.c | FileCheck %t/ae_s32x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_ric.c | FileCheck %t/ae_s32x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_rip.c | FileCheck %t/ae_s32x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_x.c | FileCheck %t/ae_s32x2_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_xc.c | FileCheck %t/ae_s32x2_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_xp.c | FileCheck %t/ae_s32x2_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_i.c | FileCheck %t/ae_s32x2f24_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_ip.c | FileCheck %t/ae_s32x2f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_ric.c | FileCheck %t/ae_s32x2f24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_rip.c | FileCheck %t/ae_s32x2f24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_x.c | FileCheck %t/ae_s32x2f24_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_xc.c | FileCheck %t/ae_s32x2f24_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_xp.c | FileCheck %t/ae_s32x2f24_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2ra64s_ip.c | FileCheck %t/ae_s32x2ra64s_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_i.c | FileCheck %t/ae_s64_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_ip.c | FileCheck %t/ae_s64_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_x.c | FileCheck %t/ae_s64_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_xc.c | FileCheck %t/ae_s64_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_xp.c | FileCheck %t/ae_s64_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa16x4_ic.c | FileCheck %t/ae_sa16x4_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa16x4_ip.c | FileCheck %t/ae_sa16x4_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa16x4_ric.c | FileCheck %t/ae_sa16x4_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa16x4_rip.c | FileCheck %t/ae_sa16x4_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24_l_ic.c | FileCheck %t/ae_sa24_l_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24_l_ip.c | FileCheck %t/ae_sa24_l_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24_l_ric.c | FileCheck %t/ae_sa24_l_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24_l_rip.c | FileCheck %t/ae_sa24_l_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24x2_ic.c | FileCheck %t/ae_sa24x2_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24x2_ip.c | FileCheck %t/ae_sa24x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24x2_ric.c | FileCheck %t/ae_sa24x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24x2_rip.c | FileCheck %t/ae_sa24x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2_ic.c | FileCheck %t/ae_sa32x2_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2_ip.c | FileCheck %t/ae_sa32x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2_ric.c | FileCheck %t/ae_sa32x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2_rip.c | FileCheck %t/ae_sa32x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2f24_ic.c | FileCheck %t/ae_sa32x2f24_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2f24_ip.c | FileCheck %t/ae_sa32x2f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2f24_ric.c | FileCheck %t/ae_sa32x2f24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2f24_rip.c | FileCheck %t/ae_sa32x2f24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa64neg_fp.c | FileCheck %t/ae_sa64neg_fp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa64pos_fp.c | FileCheck %t/ae_sa64pos_fp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_salign64_i.c | FileCheck %t/ae_salign64_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sat16x4.c | FileCheck %t/ae_sat16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sat24s.c | FileCheck %t/ae_sat24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sat48s.c | FileCheck %t/ae_sat48s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_satq56s.c | FileCheck %t/ae_satq56s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sb.c | FileCheck %t/ae_sb.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sb_ic.c | FileCheck %t/ae_sb_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sb_ip.c | FileCheck %t/ae_sb_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbf.c | FileCheck %t/ae_sbf.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbf_ic.c | FileCheck %t/ae_sbf_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbf_ip.c | FileCheck %t/ae_sbf_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbi.c | FileCheck %t/ae_sbi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbi_ic.c | FileCheck %t/ae_sbi_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbi_ip.c | FileCheck %t/ae_sbi_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sel16i.c | FileCheck %t/ae_sel16i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sel16i_n.c | FileCheck %t/ae_sel16i_n.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sext32.c | FileCheck %t/ae_sext32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sext32x2d16_10.c | FileCheck %t/ae_sext32x2d16_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sext32x2d16_32.c | FileCheck %t/ae_sext32x2d16_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sha32.c | FileCheck %t/ae_sha32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_shortswap.c | FileCheck %t/ae_shortswap.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa16s.c | FileCheck %t/ae_slaa16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa32.c | FileCheck %t/ae_slaa32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa32s.c | FileCheck %t/ae_slaa32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa64.c | FileCheck %t/ae_slaa64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa64s.c | FileCheck %t/ae_slaa64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaaq56.c | FileCheck %t/ae_slaaq56.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai16s.c | FileCheck %t/ae_slai16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai24.c | FileCheck %t/ae_slai24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai24s.c | FileCheck %t/ae_slai24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai32.c | FileCheck %t/ae_slai32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai32s.c | FileCheck %t/ae_slai32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai64.c | FileCheck %t/ae_slai64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai64s.c | FileCheck %t/ae_slai64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaisq56s.c | FileCheck %t/ae_slaisq56s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas24.c | FileCheck %t/ae_slas24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas24s.c | FileCheck %t/ae_slas24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas32.c | FileCheck %t/ae_slas32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas32s.c | FileCheck %t/ae_slas32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas64.c | FileCheck %t/ae_slas64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas64s.c | FileCheck %t/ae_slas64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slasq56.c | FileCheck %t/ae_slasq56.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slassq56s.c | FileCheck %t/ae_slassq56s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sra64_32.c | FileCheck %t/ae_sra64_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa16rs.c | FileCheck %t/ae_sraa16rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa16s.c | FileCheck %t/ae_sraa16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa32.c | FileCheck %t/ae_sraa32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa32rs.c | FileCheck %t/ae_sraa32rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa32s.c | FileCheck %t/ae_sraa32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa64.c | FileCheck %t/ae_sraa64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai16.c | FileCheck %t/ae_srai16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai16r.c | FileCheck %t/ae_srai16r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai24.c | FileCheck %t/ae_srai24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai32.c | FileCheck %t/ae_srai32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai32r.c | FileCheck %t/ae_srai32r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai64.c | FileCheck %t/ae_srai64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sras24.c | FileCheck %t/ae_sras24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sras32.c | FileCheck %t/ae_sras32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sras64.c | FileCheck %t/ae_sras64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srla32.c | FileCheck %t/ae_srla32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srla64.c | FileCheck %t/ae_srla64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srli24.c | FileCheck %t/ae_srli24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srli32.c | FileCheck %t/ae_srli32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srli64.c | FileCheck %t/ae_srli64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srls24.c | FileCheck %t/ae_srls24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srls32.c | FileCheck %t/ae_srls32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srls64.c | FileCheck %t/ae_srls64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub16.c | FileCheck %t/ae_sub16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub16s.c | FileCheck %t/ae_sub16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub24s.c | FileCheck %t/ae_sub24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub32.c | FileCheck %t/ae_sub32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub32s.c | FileCheck %t/ae_sub32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub64.c | FileCheck %t/ae_sub64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub64s.c | FileCheck %t/ae_sub64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_subadd32.c | FileCheck %t/ae_subadd32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_subadd32s.c | FileCheck %t/ae_subadd32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_trunca32f64s_l.c | FileCheck %t/ae_trunca32f64s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_trunca32x2f64s.c | FileCheck %t/ae_trunca32x2f64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_trunci32f64s_l.c | FileCheck %t/ae_trunci32f64s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_trunci32x2f64s.c | FileCheck %t/ae_trunci32x2f64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl16c.c | FileCheck %t/ae_vldl16c.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl16c_ic.c | FileCheck %t/ae_vldl16c_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl16c_ip.c | FileCheck %t/ae_vldl16c_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl16t.c | FileCheck %t/ae_vldl16t.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl32t.c | FileCheck %t/ae_vldl32t.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldsht.c | FileCheck %t/ae_vldsht.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vlel16t.c | FileCheck %t/ae_vlel16t.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vlel32t.c | FileCheck %t/ae_vlel32t.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vles16c.c | FileCheck %t/ae_vles16c.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vles16c_ic.c | FileCheck %t/ae_vles16c_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vles16c_ip.c | FileCheck %t/ae_vles16c_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_xor.c | FileCheck %t/ae_xor.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_zalign64.c | FileCheck %t/ae_zalign64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_bithead.c | FileCheck %t/rur_ae_bithead.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_bitptr.c | FileCheck %t/rur_ae_bitptr.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_bitsused.c | FileCheck %t/rur_ae_bitsused.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_cbegin0.c | FileCheck %t/rur_ae_cbegin0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_cend0.c | FileCheck %t/rur_ae_cend0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_cw_sd_no.c | FileCheck %t/rur_ae_cw_sd_no.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_cwrap.c | FileCheck %t/rur_ae_cwrap.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_first_ts.c | FileCheck %t/rur_ae_first_ts.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_nextoffset.c | FileCheck %t/rur_ae_nextoffset.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_overflow.c | FileCheck %t/rur_ae_overflow.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_ovf_sar.c | FileCheck %t/rur_ae_ovf_sar.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_sar.c | FileCheck %t/rur_ae_sar.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_searchdone.c | FileCheck %t/rur_ae_searchdone.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_tablesize.c | FileCheck %t/rur_ae_tablesize.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_ts_fts_bu_bp.c | FileCheck %t/rur_ae_ts_fts_bu_bp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_bithead.c | FileCheck %t/wur_ae_bithead.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_bitptr.c | FileCheck %t/wur_ae_bitptr.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_bitsused.c | FileCheck %t/wur_ae_bitsused.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_cbegin0.c | FileCheck %t/wur_ae_cbegin0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_cend0.c | FileCheck %t/wur_ae_cend0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_cw_sd_no.c | FileCheck %t/wur_ae_cw_sd_no.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_cwrap.c | FileCheck %t/wur_ae_cwrap.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_first_ts.c | FileCheck %t/wur_ae_first_ts.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_nextoffset.c | FileCheck %t/wur_ae_nextoffset.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_overflow.c | FileCheck %t/wur_ae_overflow.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_ovf_sar.c | FileCheck %t/wur_ae_ovf_sar.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_sar.c | FileCheck %t/wur_ae_sar.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_searchdone.c | FileCheck %t/wur_ae_searchdone.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_tablesize.c | FileCheck %t/wur_ae_tablesize.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_ts_fts_bu_bp.c | FileCheck %t/wur_ae_ts_fts_bu_bp.c +//--- ae_abs16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_abs16s(ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.abs16s(<4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_abs16s(ae_arth_v1); +} + +//--- ae_abs24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_abs24s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.abs24s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_abs24s(ae_arth_v1); +} + +//--- ae_abs32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_abs32(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.abs32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_abs32(ae_arth_v1); +} + +//--- ae_abs32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_abs32s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.abs32s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_abs32s(ae_arth_v1); +} + +//--- ae_abs64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_abs64(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.abs64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_abs64(ae_arth_v1); +} + +//--- ae_abs64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_abs64s(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.abs64s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_abs64s(ae_arth_v1); +} + +//--- ae_add16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_add16(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_add16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.add16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_add16(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_add16s(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_add16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.add16s(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_add16s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_add24s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_add24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.add24s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_add24s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_add32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_add32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.add32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_add32(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add32_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_add32_hl_lh(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_add32_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.add32.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_add32_hl_lh(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_add32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_add32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.add32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_add32s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_add64(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_add64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.add64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_add64(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_add64s(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_add64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.add64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_add64s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_addbrba32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_addbrba32(int art,int ars) { +// CHECK-LABEL: test_ae_addbrba32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.addbrba32(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_addbrba32(art, ars); +} + +//--- ae_addsub32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_addsub32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_addsub32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.addsub32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_addsub32(ae_arth_v0, ae_arth_v1); +} + +//--- ae_addsub32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_addsub32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_addsub32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.addsub32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_addsub32s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_and.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_and(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) { +// CHECK-LABEL: test_ae_and +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.and(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_and(ae_dr_to_dr_v0, ae_dr_to_dr_v1); +} + +//--- ae_cvt32x2f16_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_cvt32x2f16_10(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_cvt32x2f16_10 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.cvt32x2f16.10(<4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_cvt32x2f16_10(ae_to_dr_v0); +} + +//--- ae_cvt32x2f16_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_cvt32x2f16_32(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_cvt32x2f16_32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.cvt32x2f16.32(<4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_cvt32x2f16_32(ae_to_dr_v0); +} + +//--- ae_cvt48a32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvt48a32(int ars) { +// CHECK-LABEL: test_ae_cvt48a32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvt48a32(i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvt48a32(ars); +} + +//--- ae_cvt64a32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvt64a32(int ars) { +// CHECK-LABEL: test_ae_cvt64a32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvt64a32(i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvt64a32(ars); +} + +//--- ae_cvt64f32_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvt64f32_h(ae_int32x2 ae_dr_to_dr_v0) { +// CHECK-LABEL: test_ae_cvt64f32_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvt64f32.h(<2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvt64f32_h(ae_dr_to_dr_v0); +} + +//--- ae_cvta32f24s_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_cvta32f24s_h(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_cvta32f24s_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.cvta32f24s.h(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_cvta32f24s_h(ae_dr_to_ar_v0); +} + +//--- ae_cvta32f24s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_cvta32f24s_l(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_cvta32f24s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.cvta32f24s.l(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_cvta32f24s_l(ae_dr_to_ar_v0); +} + +//--- ae_cvtq56a32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvtq56a32s(int ars) { +// CHECK-LABEL: test_ae_cvtq56a32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvtq56a32s(i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvtq56a32s(ars); +} + +//--- ae_cvtq56p32s_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvtq56p32s_h(ae_int32x2 ae_dr_to_dr_v0) { +// CHECK-LABEL: test_ae_cvtq56p32s_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvtq56p32s.h(<2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvtq56p32s_h(ae_dr_to_dr_v0); +} + +//--- ae_cvtq56p32s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvtq56p32s_l(ae_int32x2 ae_dr_to_dr_v0) { +// CHECK-LABEL: test_ae_cvtq56p32s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvtq56p32s.l(<2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvtq56p32s_l(ae_dr_to_dr_v0); +} + +//--- ae_db.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_db(const short** ars,int art) { +// CHECK-LABEL: test_ae_db +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.db(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_db(ars, art); +} + +//--- ae_db_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_db_ic(const short** ars,int art) { +// CHECK-LABEL: test_ae_db_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.db.ic(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_db_ic(ars, art); +} + +//--- ae_db_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_db_ip(const short** ars,int art) { +// CHECK-LABEL: test_ae_db_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.db.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_db_ip(ars, art); +} + +//--- ae_dbi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_dbi(const short** ars,immediate ae_ohba) { +// CHECK-LABEL: test_ae_dbi +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.dbi(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_dbi(ars, 1); +} + +//--- ae_dbi_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_dbi_ic(const short** ars,immediate ae_ohba) { +// CHECK-LABEL: test_ae_dbi_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.dbi.ic(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_dbi_ic(ars, 1); +} + +//--- ae_dbi_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_dbi_ip(const short** ars,immediate ae_ohba) { +// CHECK-LABEL: test_ae_dbi_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.dbi.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_dbi_ip(ars, 1); +} + +//--- ae_div64d32_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_div64d32_h(ae_int64* ae_arth_v,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_div64d32_h +// CHECK: %[[LD_AE_ARTH_V:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.div64d32.h(<1 x i64> %[[LD_AE_ARTH_V]], <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_div64d32_h(ae_arth_v, ae_arth_v1); +} + +//--- ae_div64d32_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_div64d32_l(ae_int64* ae_arth_v,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_div64d32_l +// CHECK: %[[LD_AE_ARTH_V:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.div64d32.l(<1 x i64> %[[LD_AE_ARTH_V]], <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_div64d32_l(ae_arth_v, ae_arth_v1); +} + +//--- ae_eq16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool4 test_ae_eq16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_eq16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i1> @llvm.xtensa.ae.eq16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i1> %[[RET]] +return __builtin_xtensa_ae_eq16(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_eq32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool2 test_ae_eq32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_eq32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i1> @llvm.xtensa.ae.eq32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i1> %[[RET]] +return __builtin_xtensa_ae_eq32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_eq64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool test_ae_eq64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_eq64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i1> @llvm.xtensa.ae.eq64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i1> %[[RET]] +return __builtin_xtensa_ae_eq64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_l16_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_l16_i(const ae_int16* ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_l16_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.l16.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_l16_i(ars, -16); +} + +//--- ae_l16_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16_ip(ae_int16x4* ae_ls_v,const ae_int16** ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_l16_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16_ip(ae_ls_v, ars, -16); +} + +//--- ae_l16_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_l16_x(const ae_int16* ars,int art) { +// CHECK-LABEL: test_ae_l16_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.l16.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_l16_x(ars, art); +} + +//--- ae_l16_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16_xc(ae_int16x4* ae_ls_v,const ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_l16_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16_xc(ae_ls_v, ars, art); +} + +//--- ae_l16_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16_xp(ae_int16x4* ae_ls_v,const ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_l16_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16_xp(ae_ls_v, ars, art); +} + +//--- ae_l16m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l16m_i(const ae_int16* ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_l16m_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l16m.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l16m_i(ars, -16); +} + +//--- ae_l16m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16m_iu(ae_int32x2* ae_ls_v,const ae_int16** ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_l16m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.iu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16m_iu(ae_ls_v, ars, -16); +} + +//--- ae_l16m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l16m_x(const ae_int16* ars,int art) { +// CHECK-LABEL: test_ae_l16m_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l16m.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l16m_x(ars, art); +} + +//--- ae_l16m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16m_xc(ae_int32x2* ae_ls_v,const ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_l16m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16m_xc(ae_ls_v, ars, art); +} + +//--- ae_l16m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16m_xu(ae_int32x2* ae_ls_v,const ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_l16m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.xu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16m_xu(ae_ls_v, ars, art); +} + +//--- ae_l16x2m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l16x2m_i(const ae_int16x2* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l16x2m_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l16x2m.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l16x2m_i(ars, -32); +} + +//--- ae_l16x2m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x2m_iu(ae_int32x2* ae_ls_v,const ae_int16x2** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l16x2m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.iu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x2m_iu(ae_ls_v, ars, -32); +} + +//--- ae_l16x2m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l16x2m_x(const ae_int16x2* ars,int art) { +// CHECK-LABEL: test_ae_l16x2m_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l16x2m.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l16x2m_x(ars, art); +} + +//--- ae_l16x2m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x2m_xc(ae_int32x2* ae_ls_v,const ae_int16x2** ars,int art) { +// CHECK-LABEL: test_ae_l16x2m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x2m_xc(ae_ls_v, ars, art); +} + +//--- ae_l16x2m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x2m_xu(ae_int32x2* ae_ls_v,const ae_int16x2** ars,int art) { +// CHECK-LABEL: test_ae_l16x2m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.xu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x2m_xu(ae_ls_v, ars, art); +} + +//--- ae_l16x4_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_l16x4_i(const ae_int16x4* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l16x4_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.l16x4.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_l16x4_i(ars, -64); +} + +//--- ae_l16x4_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_ip(ae_int16x4* ae_ls_v,const ae_int16x4** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_l16x4_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_ip(ae_ls_v, ars, 0); +} + +//--- ae_l16x4_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_ric(ae_int16x4* ae_ls_v,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_l16x4_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.ric(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_ric(ae_ls_v, ars); +} + +//--- ae_l16x4_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_rip(ae_int16x4* ae_ls_v,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_l16x4_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.rip(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_rip(ae_ls_v, ars); +} + +//--- ae_l16x4_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_l16x4_x(const ae_int16x4* ars,int art) { +// CHECK-LABEL: test_ae_l16x4_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.l16x4.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_l16x4_x(ars, art); +} + +//--- ae_l16x4_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_xc(ae_int16x4* ae_ls_v,const ae_int16x4** ars,int art) { +// CHECK-LABEL: test_ae_l16x4_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_xc(ae_ls_v, ars, art); +} + +//--- ae_l16x4_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_xp(ae_int16x4* ae_ls_v,const ae_int16x4** ars,int art) { +// CHECK-LABEL: test_ae_l16x4_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_xp(ae_ls_v, ars, art); +} + +//--- ae_l32_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32_i(const ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32_i(ars, -32); +} + +//--- ae_l32_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32_ip(ae_int32x2* ae_ls_v,const ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32_ip(ae_ls_v, ars, -32); +} + +//--- ae_l32_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32_x(const ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_l32_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32_x(ars, art); +} + +//--- ae_l32_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32_xc(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32_xc(ae_ls_v, ars, art); +} + +//--- ae_l32_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32_xp(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32_xp(ae_ls_v, ars, art); +} + +//--- ae_l32f24_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32f24_i(const ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32f24_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32f24.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32f24_i(ars, -32); +} + +//--- ae_l32f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32f24_ip(ae_int32x2* ae_ls_v,const ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32f24_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32f24_ip(ae_ls_v, ars, -32); +} + +//--- ae_l32f24_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32f24_x(const ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_l32f24_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32f24.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32f24_x(ars, art); +} + +//--- ae_l32f24_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32f24_xc(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32f24_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32f24_xc(ae_ls_v, ars, art); +} + +//--- ae_l32f24_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32f24_xp(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32f24_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32f24_xp(ae_ls_v, ars, art); +} + +//--- ae_l32m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_l32m_i(const ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32m_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.l32m.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_l32m_i(ars, -32); +} + +//--- ae_l32m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32m_iu(ae_int64* ae_ls_v,const ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.iu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32m_iu(ae_ls_v, ars, -32); +} + +//--- ae_l32m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_l32m_x(const ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_l32m_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.l32m.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_l32m_x(ars, art); +} + +//--- ae_l32m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32m_xc(ae_int64* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32m_xc(ae_ls_v, ars, art); +} + +//--- ae_l32m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32m_xu(ae_int64* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.xu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32m_xu(ae_ls_v, ars, art); +} + +//--- ae_l32x2_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32x2_i(const ae_int32x2* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l32x2_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32x2.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32x2_i(ars, -64); +} + +//--- ae_l32x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_ip(ae_int32x2* ae_ls_v,const ae_int32x2** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_l32x2_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_ip(ae_ls_v, ars, 0); +} + +//--- ae_l32x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_ric(ae_int32x2* ae_ls_v,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_l32x2_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.ric(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_ric(ae_ls_v, ars); +} + +//--- ae_l32x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_rip(ae_int32x2* ae_ls_v,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_l32x2_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.rip(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_rip(ae_ls_v, ars); +} + +//--- ae_l32x2_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32x2_x(const ae_int32x2* ars,int art) { +// CHECK-LABEL: test_ae_l32x2_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32x2.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32x2_x(ars, art); +} + +//--- ae_l32x2_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_xc(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_l32x2_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_xc(ae_ls_v, ars, art); +} + +//--- ae_l32x2_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_xp(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_l32x2_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_xp(ae_ls_v, ars, art); +} + +//--- ae_l32x2f24_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32x2f24_i(const ae_int32x2* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l32x2f24_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32x2f24.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32x2f24_i(ars, -64); +} + +//--- ae_l32x2f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_ip(ae_int32x2* ae_ls_v,const ae_int32x2** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_l32x2f24_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_ip(ae_ls_v, ars, 0); +} + +//--- ae_l32x2f24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_ric(ae_int32x2* ae_ls_v,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_l32x2f24_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.ric(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_ric(ae_ls_v, ars); +} + +//--- ae_l32x2f24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_rip(ae_int32x2* ae_ls_v,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_l32x2f24_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.rip(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_rip(ae_ls_v, ars); +} + +//--- ae_l32x2f24_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32x2f24_x(const ae_int32x2* ars,int art) { +// CHECK-LABEL: test_ae_l32x2f24_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32x2f24.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32x2f24_x(ars, art); +} + +//--- ae_l32x2f24_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_xc(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_l32x2f24_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_xc(ae_ls_v, ars, art); +} + +//--- ae_l32x2f24_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_xp(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_l32x2f24_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_xp(ae_ls_v, ars, art); +} + +//--- ae_l64_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_l64_i(const ae_int64* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l64_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.l64.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_l64_i(ars, -64); +} + +//--- ae_l64_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l64_ip(ae_int64* ae_ls_v,const ae_int64** ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l64_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l64.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l64_ip(ae_ls_v, ars, -64); +} + +//--- ae_l64_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_l64_x(const ae_int64* ars,int art) { +// CHECK-LABEL: test_ae_l64_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.l64.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_l64_x(ars, art); +} + +//--- ae_l64_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l64_xc(ae_int64* ae_ls_v,const ae_int64** ars,int art) { +// CHECK-LABEL: test_ae_l64_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l64.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l64_xc(ae_ls_v, ars, art); +} + +//--- ae_l64_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l64_xp(ae_int64* ae_ls_v,const ae_int64** ars,int art) { +// CHECK-LABEL: test_ae_l64_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l64.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l64_xp(ae_ls_v, ars, art); +} + +//--- ae_la16x4_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4_ic(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la16x4_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4_ip(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la16x4_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4_ric(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la16x4_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4_rip(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la16x4neg_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4neg_pc(ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4neg_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la16x4neg.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4neg_pc(ae_ls_uu, ars); +} + +//--- ae_la16x4pos_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4pos_pc(ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4pos_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la16x4pos.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4pos_pc(ae_ls_uu, ars); +} + +//--- ae_la24_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24neg_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24neg_pc(ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24neg_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la24neg.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24neg_pc(ae_ls_uu, ars); +} + +//--- ae_la24pos_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24pos_pc(ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24pos_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la24pos.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24pos_pc(ae_ls_uu, ars); +} + +//--- ae_la24x2_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24x2neg_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2neg_pc(ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2neg_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la24x2neg.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2neg_pc(ae_ls_uu, ars); +} + +//--- ae_la24x2pos_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2pos_pc(ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2pos_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la24x2pos.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2pos_pc(ae_ls_uu, ars); +} + +//--- ae_la32x2_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2f24_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2f24_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2f24_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2f24_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2f24_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2f24_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2f24_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2f24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2f24_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2f24_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2f24_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2f24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2f24_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2f24_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2f24_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2neg_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2neg_pc(ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2neg_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la32x2neg.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2neg_pc(ae_ls_uu, ars); +} + +//--- ae_la32x2pos_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2pos_pc(ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2pos_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la32x2pos.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2pos_pc(ae_ls_uu, ars); +} + +//--- ae_la64_pp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_valign test_ae_la64_pp(const void* ars) { +// CHECK-LABEL: test_ae_la64_pp +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.la64.pp(ptr {{.*}}) +// CHECK: %[[CAST:.*]] = bitcast <8 x i8> %[[RET]] to i64 +// CHECK: ret i64 %[[CAST]] +return __builtin_xtensa_ae_la64_pp(ars); +} + +//--- ae_lalign64_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_valign test_ae_lalign64_i(const ae_valign* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_lalign64_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.lalign64.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: %[[CAST:.*]] = bitcast <8 x i8> %[[RET]] to i64 +// CHECK: ret i64 %[[CAST]] +return __builtin_xtensa_ae_lalign64_i(ars, -64); +} + +//--- ae_lb.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lb(int art) { +// CHECK-LABEL: test_ae_lb +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lb(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lb(art); +} + +//--- ae_lbi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbi(immediate ae_ohba) { +// CHECK-LABEL: test_ae_lbi +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbi(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbi(1); +} + +//--- ae_lbk.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbk(int ars,int art) { +// CHECK-LABEL: test_ae_lbk +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbk(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbk(ars, art); +} + +//--- ae_lbki.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbki(int ars,immediate ae_ohba) { +// CHECK-LABEL: test_ae_lbki +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbki(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbki(ars, 1); +} + +//--- ae_lbs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbs(int art) { +// CHECK-LABEL: test_ae_lbs +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbs(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbs(art); +} + +//--- ae_lbsi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbsi(immediate ae_ohba) { +// CHECK-LABEL: test_ae_lbsi +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbsi(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbsi(1); +} + +//--- ae_le16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool4 test_ae_le16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_le16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i1> @llvm.xtensa.ae.le16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i1> %[[RET]] +return __builtin_xtensa_ae_le16(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_le32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool2 test_ae_le32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_le32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i1> @llvm.xtensa.ae.le32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i1> %[[RET]] +return __builtin_xtensa_ae_le32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_le64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool test_ae_le64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_le64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i1> @llvm.xtensa.ae.le64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i1> %[[RET]] +return __builtin_xtensa_ae_le64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_lt16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool4 test_ae_lt16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_lt16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i1> @llvm.xtensa.ae.lt16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i1> %[[RET]] +return __builtin_xtensa_ae_lt16(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_lt32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool2 test_ae_lt32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_lt32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i1> @llvm.xtensa.ae.lt32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i1> %[[RET]] +return __builtin_xtensa_ae_lt32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_lt64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool test_ae_lt64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_lt64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i1> @llvm.xtensa.ae.lt64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i1> %[[RET]] +return __builtin_xtensa_ae_lt64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_max32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_max32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_max32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.max32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_max32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_max64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_max64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_max64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.max64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_max64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_maxabs32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_maxabs32s(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_maxabs32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.maxabs32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_maxabs32s(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_maxabs64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_maxabs64s(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_maxabs64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.maxabs64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_maxabs64s(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_min32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_min32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_min32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.min32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_min32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_min64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_min64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_min64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.min64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_min64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_minabs32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_minabs32s(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_minabs32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.minabs32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_minabs32s(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_minabs64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_minabs64s(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_minabs64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.minabs64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_minabs64s(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_mov.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mov(ae_int64 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_mov +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mov(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mov(ae_to_dr_v0); +} + +//--- ae_movad16_0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad16_0(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad16_0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad16.0(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad16_0(ae_dr_to_ar_v0); +} + +//--- ae_movad16_1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad16_1(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad16_1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad16.1(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad16_1(ae_dr_to_ar_v0); +} + +//--- ae_movad16_2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad16_2(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad16_2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad16.2(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad16_2(ae_dr_to_ar_v0); +} + +//--- ae_movad16_3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad16_3(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad16_3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad16.3(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad16_3(ae_dr_to_ar_v0); +} + +//--- ae_movad32_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad32_h(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad32_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad32.h(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad32_h(ae_dr_to_ar_v0); +} + +//--- ae_movad32_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad32_l(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad32_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad32.l(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad32_l(ae_dr_to_ar_v0); +} + +//--- ae_movalign.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_valign test_ae_movalign(ae_valign ae_uu_v) { +// CHECK-LABEL: test_ae_movalign +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.movalign(<8 x i8> {{.*}}) +// CHECK: %[[CAST:.*]] = bitcast <8 x i8> %[[RET]] to i64 +// CHECK: ret i64 %[[CAST]] +return __builtin_xtensa_ae_movalign(ae_uu_v); +} + +//--- ae_movda16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_movda16(int ars) { +// CHECK-LABEL: test_ae_movda16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.movda16(i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_movda16(ars); +} + +//--- ae_movda16x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_movda16x2(int ars,int art) { +// CHECK-LABEL: test_ae_movda16x2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.movda16x2(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_movda16x2(ars, art); +} + +//--- ae_movda32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32 test_ae_movda32(int ars) { +// CHECK-LABEL: test_ae_movda32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i32> @llvm.xtensa.ae.movda32(i32 {{.*}}) +// CHECK: ret <1 x i32> %[[RET]] +return __builtin_xtensa_ae_movda32(ars); +} + +//--- ae_movda32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_movda32x2(int ars,int art) { +// CHECK-LABEL: test_ae_movda32x2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.movda32x2(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_movda32x2(ars, art); +} + +//--- ae_movf16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movf16x4(ae_int16x4* ae_cmov_v,ae_int16x4 ae_cmov_v0,xtbool4 bt4) { +// CHECK-LABEL: test_ae_movf16x4 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <4 x i16>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.movf16x4(<4 x i16> %[[LD_AE_CMOV_V]], <4 x i16> {{.*}}, <4 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movf16x4(ae_cmov_v, ae_cmov_v0, bt4); +} + +//--- ae_movf32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movf32x2(ae_int32x2* ae_cmov_v,ae_int32x2 ae_cmov_v0,xtbool2 bt2) { +// CHECK-LABEL: test_ae_movf32x2 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.movf32x2(<2 x i32> %[[LD_AE_CMOV_V]], <2 x i32> {{.*}}, <2 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movf32x2(ae_cmov_v, ae_cmov_v0, bt2); +} + +//--- ae_movf64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movf64(ae_int64* ae_cmov_v,ae_int64 ae_cmov_v0,xtbool bt) { +// CHECK-LABEL: test_ae_movf64 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.movf64(<1 x i64> %[[LD_AE_CMOV_V]], <1 x i64> {{.*}}, <1 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movf64(ae_cmov_v, ae_cmov_v0, bt); +} + +//--- ae_movi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_movi(immediate movi_imm) { +// CHECK-LABEL: test_ae_movi +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.movi(i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_movi(-16); +} + +//--- ae_movt16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movt16x4(ae_int16x4* ae_cmov_v,ae_int16x4 ae_cmov_v0,xtbool4 bt4) { +// CHECK-LABEL: test_ae_movt16x4 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <4 x i16>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.movt16x4(<4 x i16> %[[LD_AE_CMOV_V]], <4 x i16> {{.*}}, <4 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movt16x4(ae_cmov_v, ae_cmov_v0, bt4); +} + +//--- ae_movt32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movt32x2(ae_int32x2* ae_cmov_v,ae_int32x2 ae_cmov_v0,xtbool2 bt2) { +// CHECK-LABEL: test_ae_movt32x2 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.movt32x2(<2 x i32> %[[LD_AE_CMOV_V]], <2 x i32> {{.*}}, <2 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movt32x2(ae_cmov_v, ae_cmov_v0, bt2); +} + +//--- ae_movt64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movt64(ae_int64* ae_cmov_v,ae_int64 ae_cmov_v0,xtbool bt) { +// CHECK-LABEL: test_ae_movt64 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.movt64(<1 x i64> %[[LD_AE_CMOV_V]], <1 x i64> {{.*}}, <1 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movt64(ae_cmov_v, ae_cmov_v0, bt); +} + +//--- ae_mul16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mul16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mul16x4 +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mul16x4(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mul16x4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mul32_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32_hh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32_lh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32u_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32u_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32u_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32u.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32u_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_h0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_h0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_h1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_h1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_h2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_h2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_h3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_h3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_l1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_l1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_l3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_l3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mula16x4 +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mula16x4(<2 x i32> %[[LD_AE_MUL_Q1]], <2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mula16x4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mula32_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32u_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32u_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32u_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32u.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32u_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_h0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_h0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_h1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_h1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_h2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_h2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_h3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_h3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaad24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaad24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaad24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaad24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad32x16_h0_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h0_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h0_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h0.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h0_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaad32x16_h0_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h0_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h0_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h0.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h0_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaad32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad32x16_h2_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h2_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h2_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h2.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h2_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaad32x16_h2_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h2_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h2_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h2.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h2_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaad32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd16ss_11_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_11_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_11_00 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.11.00(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_11_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd16ss_11_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_11_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_11_00_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.11.00.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_11_00_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd16ss_13_02.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_13_02(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_13_02 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.13.02(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_13_02(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd16ss_13_02_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_13_02_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_13_02_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.13.02.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_13_02_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd16ss_33_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_33_22(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_33_22 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.33.22(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_33_22(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd16ss_33_22_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_33_22_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_33_22_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.33.22.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_33_22_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaafd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaafd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaafd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaafd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd32x16_h0_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h0_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h0_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h0.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h0_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd32x16_h0_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h0_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h0_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h0.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h0_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd32x16_h2_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h2_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h2_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h2.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h2_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd32x16_h2_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h2_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h2_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h2.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h2_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulac24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulac24(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulac24 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulac24(<2 x i32> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulac24(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulac32x16_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulac32x16_h(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulac32x16_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulac32x16.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulac32x16_h(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulac32x16_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulac32x16_l(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulac32x16_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulac32x16.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulac32x16_l(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulaf16ss_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_00 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.00(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf16ss_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_00_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.00.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_00_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf16ss_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_10(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_10 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.10(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_10(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_11.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_11(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_11 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.11(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_11(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_20.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_20(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_20 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.20(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_20(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_21.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_21(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_21 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.21(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_21(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_22(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_22 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.22(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_22(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_30.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_30(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_30 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.30(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_30(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_31.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_31(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_31 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.31(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_31(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_32(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_32 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.32(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_32(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_33.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_33(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_33 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.33(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_33(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16x4ss.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mulaf16x4ss +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulaf16x4ss(<2 x i32> %[[LD_AE_MUL_Q1]], <2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulaf16x4ss(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mulaf32r_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32r_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32r_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32r.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32r_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32r_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32r_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32r_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32r.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32r_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32r_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32r_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32r_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32r.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32r_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32r_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32r_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32r_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32r.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32r_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32s_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32s.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32s_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32s_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32s.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32s_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32s_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32s.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32s_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32s_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32s.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32s_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf48q32sp16s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf48q32sp16s_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf48q32sp16s_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16s.l(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf48q32sp16s_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf48q32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf48q32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf48q32sp16s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf48q32sp16s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf48q32sp16u_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf48q32sp16u_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf48q32sp16u_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16u.l(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf48q32sp16u_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf48q32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf48q32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf48q32sp16u_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16u.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf48q32sp16u_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafc24ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafc24ra(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int32x2 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulafc24ra +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafc24ra(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafc24ra(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulafc32x16ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafc32x16ras_h(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulafc32x16ras_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafc32x16ras.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafc32x16ras_h(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulafc32x16ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafc32x16ras_l(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulafc32x16ras_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafc32x16ras.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafc32x16ras_l(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulafd24x2_fir_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd24x2_fir_h(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd24x2_fir_h +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd24x2.fir.h(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd24x2_fir_h(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd24x2_fir_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd24x2_fir_l(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd24x2_fir_l +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd24x2.fir.l(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd24x2_fir_l(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd32x16x2_fir_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd32x16x2_fir_hh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd32x16x2_fir_hh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.hh(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd32x16x2_fir_hh(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd32x16x2_fir_hl.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd32x16x2_fir_hl(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd32x16x2_fir_hl +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.hl(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd32x16x2_fir_hl(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd32x16x2_fir_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd32x16x2_fir_lh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd32x16x2_fir_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd32x16x2_fir_lh(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd32x16x2_fir_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd32x16x2_fir_ll(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd32x16x2_fir_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd32x16x2_fir_ll(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafp24x2r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp24x2r(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp24x2r +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp24x2r(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp24x2r(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp24x2r_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp24x2r_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp24x2r_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp24x2r.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp24x2r_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp24x2ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp24x2ra(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp24x2ra +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp24x2ra(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp24x2ra(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp24x2ra_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp24x2ra_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp24x2ra_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp24x2ra.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp24x2ra_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x16x2ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2ras_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2ras_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2ras_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x16x2ras_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2ras_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2ras_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.h.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2ras_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x16x2ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2ras_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2ras_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2ras_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x16x2ras_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2ras_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2ras_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.l.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2ras_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x16x2rs_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2rs_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2rs_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2rs_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x16x2rs_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2rs_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2rs_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.h.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2rs_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x16x2rs_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2rs_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2rs_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2rs_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x16x2rs_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2rs_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2rs_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.l.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2rs_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x2ras.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x2ras(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x2ras +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x2ras(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x2ras(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x2rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x2rs(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x2rs +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x2rs(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x2rs(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafq32sp24s_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulafq32sp24s.h.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafq32sp24s_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafq32sp24s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulafq32sp24s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafq32sp24s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulap24x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap24x2(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulap24x2 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap24x2(<2 x i32> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap24x2(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulap24x2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap24x2_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulap24x2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap24x2.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap24x2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulap32x16x2_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap32x16x2_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulap32x16x2_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap32x16x2.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap32x16x2_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulap32x16x2_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap32x16x2_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulap32x16x2_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap32x16x2.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap32x16x2_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulap32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap32x2(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulap32x2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap32x2(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap32x2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaq32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaq32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaq32sp16s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaq32sp16s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaq32sp16s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaq32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaq32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaq32sp16u_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaq32sp16u.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaq32sp16u_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mularfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mularfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mularfq32sp24s_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mularfq32sp24s.h.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mularfq32sp24s_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mularfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mularfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mularfq32sp24s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mularfq32sp24s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mularfq32sp24s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulas32f48p16s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulas32f48p16s_hh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_hh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_hh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.hh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_hh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulas32f48p16s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulas32f48p16s_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulas32f48p16s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulas32f48p16s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulasd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulasd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulasd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulasd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulasd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulasd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulasd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulasd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasfd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulasfd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulasfd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasfd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasfd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulasfd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulasfd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasfd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasfd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulasfd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulasfd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasfd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasfd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulasfd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulasfd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasfd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulc24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulc24(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulc24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulc24(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulc24(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulc32x16_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulc32x16_h(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulc32x16_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulc32x16.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulc32x16_h(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulc32x16_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulc32x16_l(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulc32x16_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulc32x16.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulc32x16_l(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulf16ss_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf16ss_00 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.00(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_00(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf16ss_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf16ss_00_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.00.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_00_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf16ss_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_10(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_10 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.10(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_10(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_11.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_11(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_11 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.11(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_11(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_20.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_20(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_20 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.20(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_20(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_21.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_21(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_21 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.21(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_21(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_22(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_22 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.22(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_22(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_30.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_30(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_30 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.30(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_30(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_31.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_31(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_31 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.31(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_31(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_32(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.32(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_32(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_33.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_33(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_33 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.33(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_33(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16x4ss.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mulf16x4ss +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulf16x4ss(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulf16x4ss(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mulf32r_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32r_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32r_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32r.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32r_hh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32r_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32r_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32r_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32r.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32r_lh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32r_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32r_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32r_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32r.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32r_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32r_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32r_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32r_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32r.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32r_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32s_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32s_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32s.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32s_hh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32s_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32s_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32s.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32s_lh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32s_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32s_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32s.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32s_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32s_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32s_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32s.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32s_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf48q32sp16s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf48q32sp16s_l(ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf48q32sp16s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16s.l(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf48q32sp16s_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf48q32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf48q32sp16s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf48q32sp16s_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16s.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf48q32sp16s_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf48q32sp16u_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf48q32sp16u_l(ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf48q32sp16u_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16u.l(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf48q32sp16u_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf48q32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf48q32sp16u_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf48q32sp16u_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16u.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf48q32sp16u_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfc24ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfc24ra(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int32x2 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulfc24ra +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfc24ra(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfc24ra(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulfc32x16ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfc32x16ras_h(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulfc32x16ras_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfc32x16ras.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfc32x16ras_h(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulfc32x16ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfc32x16ras_l(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulfc32x16ras_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfc32x16ras.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfc32x16ras_l(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulfd24x2_fir_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd24x2_fir_h(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd24x2_fir_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd24x2.fir.h(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd24x2_fir_h(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd24x2_fir_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd24x2_fir_l(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd24x2_fir_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd24x2.fir.l(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd24x2_fir_l(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd32x16x2_fir_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd32x16x2_fir_hh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd32x16x2_fir_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd32x16x2_fir_hh(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd32x16x2_fir_hl.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd32x16x2_fir_hl(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd32x16x2_fir_hl +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.hl(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd32x16x2_fir_hl(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd32x16x2_fir_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd32x16x2_fir_lh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd32x16x2_fir_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd32x16x2_fir_lh(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd32x16x2_fir_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd32x16x2_fir_ll(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd32x16x2_fir_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd32x16x2_fir_ll(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfp16x4ras.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_mulfp16x4ras(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulfp16x4ras +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.mulfp16x4ras(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_mulfp16x4ras(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulfp16x4s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_mulfp16x4s(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulfp16x4s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.mulfp16x4s(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_mulfp16x4s(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulfp24x2r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp24x2r(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp24x2r +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp24x2r(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp24x2r(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp24x2r_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp24x2r_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp24x2r_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp24x2r.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp24x2r_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp24x2ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp24x2ra(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp24x2ra +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp24x2ra(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp24x2ra(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp24x2ra_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp24x2ra_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp24x2ra_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp24x2ra.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp24x2ra_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x16x2ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2ras_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2ras_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2ras_h(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x16x2ras_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2ras_h_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2ras_h_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.h.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2ras_h_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x16x2ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2ras_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2ras_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2ras_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x16x2ras_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2ras_l_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2ras_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.l.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2ras_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x16x2rs_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2rs_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2rs_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2rs_h(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x16x2rs_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2rs_h_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2rs_h_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.h.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2rs_h_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x16x2rs_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2rs_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2rs_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2rs_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x16x2rs_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2rs_l_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2rs_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.l.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2rs_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x2ras.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x2ras(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x2ras +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x2ras(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x2ras(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x2rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x2rs(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x2rs +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x2rs(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x2rs(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulfq32sp24s_h_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfq32sp24s_h_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulfq32sp24s.h.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulfq32sp24s_h_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulfq32sp24s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfq32sp24s_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulfq32sp24s.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulfq32sp24s_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulp24x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp24x2(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulp24x2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp24x2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp24x2(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulp24x2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp24x2_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulp24x2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp24x2.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp24x2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulp32x16x2_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp32x16x2_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulp32x16x2_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp32x16x2.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp32x16x2_h(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulp32x16x2_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp32x16x2_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulp32x16x2_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp32x16x2.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp32x16x2_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulp32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp32x2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulp32x2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp32x2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp32x2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulq32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulq32sp16s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulq32sp16s_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulq32sp16s.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulq32sp16s_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulq32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulq32sp16u_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulq32sp16u_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulq32sp16u.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulq32sp16u_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulrfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulrfq32sp24s_h_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulrfq32sp24s_h_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulrfq32sp24s.h.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulrfq32sp24s_h_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulrfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulrfq32sp24s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulrfq32sp24s_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulrfq32sp24s.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulrfq32sp24s_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_muls16x4 +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.muls16x4(<2 x i32> %[[LD_AE_MUL_Q1]], <2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_muls16x4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_muls32_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32f48p16s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_hh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32f48p16s_hh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_hh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_hh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.hh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_hh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32f48p16s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_lh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32f48p16s_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32f48p16s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32f48p16s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32u_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32u_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32u_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32u.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32u_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_h0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_h0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_h1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_h1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_h2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_h2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_h3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_h3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsad24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsad24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsad24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsad24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsad32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad32x16_h1_l0(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsad32x16_h1_l0 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h1.l0(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad32x16_h1_l0(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsad32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsad32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsad32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad32x16_h3_l2(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsad32x16_h3_l2 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h3.l2(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad32x16_h3_l2(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsad32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsad32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsafd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsafd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsafd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsafd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsafd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsafd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsafd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsafd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsafd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsafd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsafd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsafd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf16ss_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_00 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.00(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf16ss_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_00_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.00.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_00_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf16ss_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_10(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_10 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.10(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_10(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_11.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_11(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_11 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.11(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_11(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_20.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_20(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_20 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.20(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_20(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_21.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_21(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_21 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.21(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_21(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_22(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_22 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.22(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_22(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_30.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_30(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_30 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.30(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_30(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_31.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_31(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_31 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.31(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_31(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_32(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_32 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.32(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_32(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_33.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_33(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_33 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.33(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_33(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16x4ss.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mulsf16x4ss +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulsf16x4ss(<2 x i32> %[[LD_AE_MUL_Q1]], <2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulsf16x4ss(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mulsf32r_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32r_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32r_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32r.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32r_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32r_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32r_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32r_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32r.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32r_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32r_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32r_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32r_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32r.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32r_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32r_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32r_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32r_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32r.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32r_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32s_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32s.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32s_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32s_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32s.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32s_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32s_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32s.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32s_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf48q32sp16s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf48q32sp16s_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf48q32sp16s_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16s.l(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf48q32sp16s_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf48q32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf48q32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf48q32sp16s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf48q32sp16s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf48q32sp16u_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf48q32sp16u_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf48q32sp16u_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16u.l(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf48q32sp16u_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf48q32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf48q32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf48q32sp16u_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16u.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf48q32sp16u_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp24x2r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp24x2r(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp24x2r +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp24x2r(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp24x2r(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp24x2r_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp24x2r_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp24x2r_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp24x2r.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp24x2r_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp24x2ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp24x2ra(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp24x2ra +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp24x2ra(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp24x2ra(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp24x2ra_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp24x2ra_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp24x2ra_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp24x2ra.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp24x2ra_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x16x2ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2ras_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2ras_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2ras_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x16x2ras_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2ras_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2ras_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.h.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2ras_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x16x2ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2ras_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2ras_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2ras_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x16x2ras_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2ras_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2ras_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.l.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2ras_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x16x2rs_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2rs_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2rs_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2rs_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x16x2rs_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2rs_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2rs_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.h.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2rs_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x16x2rs_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2rs_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2rs_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2rs_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x16x2rs_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2rs_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2rs_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.l.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2rs_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x2ras.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x2ras(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x2ras +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x2ras(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x2ras(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x2rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x2rs(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x2rs +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x2rs(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x2rs(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfq32sp24s_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsfq32sp24s.h.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfq32sp24s_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfq32sp24s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsfq32sp24s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfq32sp24s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsp24x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp24x2(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsp24x2 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp24x2(<2 x i32> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp24x2(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsp24x2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp24x2_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsp24x2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp24x2.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp24x2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsp32x16x2_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp32x16x2_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsp32x16x2_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp32x16x2.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp32x16x2_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsp32x16x2_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp32x16x2_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsp32x16x2_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp32x16x2.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp32x16x2_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsp32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp32x2(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsp32x2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp32x2(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp32x2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsq32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsq32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsq32sp16s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsq32sp16s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsq32sp16s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsq32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsq32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsq32sp16u_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsq32sp16u.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsq32sp16u_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsrfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsrfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsrfq32sp24s_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsrfq32sp24s.h.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsrfq32sp24s_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsrfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsrfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsrfq32sp24s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsrfq32sp24s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsrfq32sp24s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulss32f48p16s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulss32f48p16s_hh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_hh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_hh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.hh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_hh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulss32f48p16s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulss32f48p16s_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulss32f48p16s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulss32f48p16s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulssd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulssd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulssd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulssd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd16ss_11_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_11_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_11_00 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.11.00(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_11_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd16ss_11_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_11_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_11_00_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.11.00.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_11_00_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd16ss_13_02.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_13_02(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_13_02 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.13.02(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_13_02(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd16ss_13_02_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_13_02_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_13_02_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.13.02.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_13_02_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd16ss_33_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_33_22(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_33_22 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.33.22(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_33_22(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd16ss_33_22_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_33_22_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_33_22_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.33.22.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_33_22_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulssfd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulssfd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulssfd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulssfd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzaad24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzaad24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzaad24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzaad24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad32x16_h0_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h0_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h0_l1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h0.l1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h0_l1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaad32x16_h0_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h0_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h0_l1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h0.l1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h0_l1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaad32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad32x16_h2_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h2_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h2_l3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h2.l3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h2_l3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaad32x16_h2_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h2_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h2_l3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h2.l3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h2_l3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaad32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd16ss_11_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_11_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_11_00 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.11.00(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_11_00(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd16ss_11_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_11_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_11_00_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.11.00.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_11_00_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd16ss_13_02.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_13_02(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_13_02 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.13.02(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_13_02(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd16ss_13_02_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_13_02_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_13_02_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.13.02.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_13_02_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd16ss_33_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_33_22(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_33_22 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.33.22(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_33_22(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd16ss_33_22_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_33_22_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_33_22_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.33.22.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_33_22_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzaafd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzaafd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzaafd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzaafd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd32x16_h0_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h0_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h0_l1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h0.l1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h0_l1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd32x16_h0_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h0_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h0_l1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h0.l1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd32x16_h2_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h2_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h2_l3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h2.l3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h2_l3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd32x16_h2_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h2_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h2_l3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h2.l3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzasd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzasd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzasd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzasd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzasd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzasd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzasd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzasd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasfd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzasfd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzasfd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasfd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasfd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzasfd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzasfd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasfd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasfd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzasfd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzasfd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasfd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasfd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzasfd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzasfd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasfd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsad24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzsad24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzsad24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsad24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsad32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad32x16_h1_l0(ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzsad32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad32x16_h1_l0(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzsad32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsad32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsad32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad32x16_h3_l2(ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzsad32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad32x16_h3_l2(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzsad32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsad32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsafd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzsafd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzsafd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsafd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsafd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzsafd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzsafd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsafd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsafd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzsafd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzsafd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsafd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzssd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzssd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzssd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzssd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd16ss_11_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_11_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_11_00 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.11.00(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_11_00(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd16ss_11_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_11_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_11_00_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.11.00.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_11_00_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd16ss_13_02.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_13_02(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_13_02 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.13.02(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_13_02(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd16ss_13_02_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_13_02_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_13_02_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.13.02.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_13_02_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd16ss_33_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_33_22(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_33_22 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.33.22(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_33_22(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd16ss_33_22_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_33_22_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_33_22_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.33.22.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_33_22_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzssfd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzssfd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzssfd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzssfd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_nand.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_nand(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) { +// CHECK-LABEL: test_ae_nand +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.nand(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_nand(ae_dr_to_dr_v0, ae_dr_to_dr_v1); +} + +//--- ae_neg16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_neg16s(ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.neg16s(<4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_neg16s(ae_arth_v1); +} + +//--- ae_neg24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_neg24s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.neg24s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_neg24s(ae_arth_v1); +} + +//--- ae_neg32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_neg32(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.neg32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_neg32(ae_arth_v1); +} + +//--- ae_neg32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_neg32s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.neg32s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_neg32s(ae_arth_v1); +} + +//--- ae_neg64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_neg64(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.neg64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_neg64(ae_arth_v1); +} + +//--- ae_neg64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_neg64s(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.neg64s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_neg64s(ae_arth_v1); +} + +//--- ae_nsa64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_nsa64(ae_int64 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_nsa64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.nsa64(<1 x i64> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_nsa64(ae_dr_to_ar_v0); +} + +//--- ae_nsaz16_0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_nsaz16_0(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_nsaz16_0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.nsaz16.0(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_nsaz16_0(ae_dr_to_ar_v0); +} + +//--- ae_nsaz32_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_nsaz32_l(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_nsaz32_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.nsaz32.l(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_nsaz32_l(ae_dr_to_ar_v0); +} + +//--- ae_or.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_or(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) { +// CHECK-LABEL: test_ae_or +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.or(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_or(ae_dr_to_dr_v0, ae_dr_to_dr_v1); +} + +//--- ae_pksr24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_pksr24(ae_int32x2* ae_pks_d,ae_int64 ae_pks_s,immediate ae_imm2) { +// CHECK-LABEL: test_ae_pksr24 +// CHECK: %[[LD_AE_PKS_D:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.pksr24(<2 x i32> %[[LD_AE_PKS_D]], <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_pksr24(ae_pks_d, ae_pks_s, 0); +} + +//--- ae_pksr32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_pksr32(ae_int32x2* ae_pks_d,ae_int64 ae_pks_s,immediate ae_imm2) { +// CHECK-LABEL: test_ae_pksr32 +// CHECK: %[[LD_AE_PKS_D:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.pksr32(<2 x i32> %[[LD_AE_PKS_D]], <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_pksr32(ae_pks_d, ae_pks_s, 0); +} + +//--- ae_round16x4f32sasym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_round16x4f32sasym(ae_int32x2 ae_arth_v1,ae_int32x2 ae_arth_v0) { +// CHECK-LABEL: test_ae_round16x4f32sasym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.round16x4f32sasym(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_round16x4f32sasym(ae_arth_v1, ae_arth_v0); +} + +//--- ae_round16x4f32ssym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_round16x4f32ssym(ae_int32x2 ae_arth_v1,ae_int32x2 ae_arth_v0) { +// CHECK-LABEL: test_ae_round16x4f32ssym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.round16x4f32ssym(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_round16x4f32ssym(ae_arth_v1, ae_arth_v0); +} + +//--- ae_round24x2f48sasym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round24x2f48sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round24x2f48sasym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round24x2f48sasym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round24x2f48sasym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round24x2f48ssym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round24x2f48ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round24x2f48ssym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round24x2f48ssym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round24x2f48ssym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round32x2f48sasym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round32x2f48sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round32x2f48sasym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round32x2f48sasym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round32x2f48sasym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round32x2f48ssym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round32x2f48ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round32x2f48ssym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round32x2f48ssym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round32x2f48ssym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round32x2f64sasym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round32x2f64sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round32x2f64sasym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round32x2f64sasym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round32x2f64sasym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round32x2f64ssym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round32x2f64ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round32x2f64ssym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round32x2f64ssym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round32x2f64ssym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_roundsp16f24asym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_roundsp16f24asym(ae_int32x2 ae_arth_v0) { +// CHECK-LABEL: test_ae_roundsp16f24asym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.roundsp16f24asym(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_roundsp16f24asym(ae_arth_v0); +} + +//--- ae_roundsp16f24sym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_roundsp16f24sym(ae_int32x2 ae_arth_v0) { +// CHECK-LABEL: test_ae_roundsp16f24sym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.roundsp16f24sym(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_roundsp16f24sym(ae_arth_v0); +} + +//--- ae_roundsp16q48x2asym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_roundsp16q48x2asym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_roundsp16q48x2asym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.roundsp16q48x2asym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_roundsp16q48x2asym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_roundsp16q48x2sym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_roundsp16q48x2sym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_roundsp16q48x2sym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.roundsp16q48x2sym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_roundsp16q48x2sym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_roundsq32f48asym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_roundsq32f48asym(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_roundsq32f48asym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.roundsq32f48asym(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_roundsq32f48asym(ae_arth_v1); +} + +//--- ae_roundsq32f48sym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_roundsq32f48sym(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_roundsq32f48sym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.roundsq32f48sym(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_roundsq32f48sym(ae_arth_v1); +} + +//--- ae_s16_0_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_i(ae_int16x4 ae_ls_v,ae_int16* ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_s16_0_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16.0.i(<4 x i16> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_i(ae_ls_v, ars, -16); +} + +//--- ae_s16_0_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_ip(ae_int16x4 ae_ls_v,ae_int16** ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_s16_0_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16.0.ip(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_ip(ae_ls_v, ars, -16); +} + +//--- ae_s16_0_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_x(ae_int16x4 ae_ls_v,ae_int16* ars,int art) { +// CHECK-LABEL: test_ae_s16_0_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16.0.x(<4 x i16> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_x(ae_ls_v, ars, art); +} + +//--- ae_s16_0_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_xc(ae_int16x4 ae_ls_v,ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_s16_0_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16.0.xc(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_xc(ae_ls_v, ars, art); +} + +//--- ae_s16_0_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_xp(ae_int16x4 ae_ls_v,ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_s16_0_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16.0.xp(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_xp(ae_ls_v, ars, art); +} + +//--- ae_s16m_l_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_i(ae_int32x2 ae_ls_v,ae_int16* ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_s16m_l_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16m.l.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_i(ae_ls_v, ars, -16); +} + +//--- ae_s16m_l_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_iu(ae_int32x2 ae_ls_v,ae_int16** ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_s16m_l_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16m.l.iu(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_iu(ae_ls_v, ars, -16); +} + +//--- ae_s16m_l_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_x(ae_int32x2 ae_ls_v,ae_int16* ars,int art) { +// CHECK-LABEL: test_ae_s16m_l_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16m.l.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_x(ae_ls_v, ars, art); +} + +//--- ae_s16m_l_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_xc(ae_int32x2 ae_ls_v,ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_s16m_l_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16m.l.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_xc(ae_ls_v, ars, art); +} + +//--- ae_s16m_l_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_xu(ae_int32x2 ae_ls_v,ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_s16m_l_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16m.l.xu(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_xu(ae_ls_v, ars, art); +} + +//--- ae_s16x2m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_i(ae_int32x2 ae_ls_v,ae_int16x2* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s16x2m_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16x2m.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_i(ae_ls_v, ars, -32); +} + +//--- ae_s16x2m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_iu(ae_int32x2 ae_ls_v,ae_int16x2** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s16x2m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x2m.iu(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_iu(ae_ls_v, ars, -32); +} + +//--- ae_s16x2m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_x(ae_int32x2 ae_ls_v,ae_int16x2* ars,int art) { +// CHECK-LABEL: test_ae_s16x2m_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16x2m.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_x(ae_ls_v, ars, art); +} + +//--- ae_s16x2m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_xc(ae_int32x2 ae_ls_v,ae_int16x2** ars,int art) { +// CHECK-LABEL: test_ae_s16x2m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x2m.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_xc(ae_ls_v, ars, art); +} + +//--- ae_s16x2m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_xu(ae_int32x2 ae_ls_v,ae_int16x2** ars,int art) { +// CHECK-LABEL: test_ae_s16x2m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x2m.xu(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_xu(ae_ls_v, ars, art); +} + +//--- ae_s16x4_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_i(ae_int16x4 ae_ls_v,ae_int16x4* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s16x4_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16x4.i(<4 x i16> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_i(ae_ls_v, ars, -64); +} + +//--- ae_s16x4_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_ip(ae_int16x4 ae_ls_v,ae_int16x4** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_s16x4_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.ip(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_ip(ae_ls_v, ars, 0); +} + +//--- ae_s16x4_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_ric(ae_int16x4 ae_ls_v,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_s16x4_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.ric(<4 x i16> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_ric(ae_ls_v, ars); +} + +//--- ae_s16x4_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_rip(ae_int16x4 ae_ls_v,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_s16x4_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.rip(<4 x i16> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_rip(ae_ls_v, ars); +} + +//--- ae_s16x4_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_x(ae_int16x4 ae_ls_v,ae_int16x4* ars,int art) { +// CHECK-LABEL: test_ae_s16x4_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16x4.x(<4 x i16> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_x(ae_ls_v, ars, art); +} + +//--- ae_s16x4_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_xc(ae_int16x4 ae_ls_v,ae_int16x4** ars,int art) { +// CHECK-LABEL: test_ae_s16x4_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.xc(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_xc(ae_ls_v, ars, art); +} + +//--- ae_s16x4_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_xp(ae_int16x4 ae_ls_v,ae_int16x4** ars,int art) { +// CHECK-LABEL: test_ae_s16x4_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.xp(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_xp(ae_ls_v, ars, art); +} + +//--- ae_s24ra64s_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_i(ae_int64 ae_ls_v1,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s24ra64s_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s24ra64s.i(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_i(ae_ls_v1, ars, -32); +} + +//--- ae_s24ra64s_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_ip(ae_int64 ae_ls_v1,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s24ra64s_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s24ra64s.ip(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_ip(ae_ls_v1, ars, -32); +} + +//--- ae_s24ra64s_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_x(ae_int64 ae_ls_v1,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s24ra64s_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s24ra64s.x(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_x(ae_ls_v1, ars, art); +} + +//--- ae_s24ra64s_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_xc(ae_int64 ae_ls_v1,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s24ra64s_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s24ra64s.xc(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_xc(ae_ls_v1, ars, art); +} + +//--- ae_s24ra64s_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_xp(ae_int64 ae_ls_v1,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s24ra64s_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s24ra64s.xp(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_xp(ae_ls_v1, ars, art); +} + +//--- ae_s24x2ra64s_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24x2ra64s_ip(ae_int64 ae_ls_v2,ae_int64 ae_ls_v1,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s24x2ra64s_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s24x2ra64s.ip(<1 x i64> {{.*}}, <1 x i64> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s24x2ra64s_ip(ae_ls_v2, ae_ls_v1, ars); +} + +//--- ae_s32_l_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_i(ae_int32x2 ae_ls_v,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32_l_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32.l.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_i(ae_ls_v, ars, -32); +} + +//--- ae_s32_l_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_ip(ae_int32x2 ae_ls_v,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32_l_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32.l.ip(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_ip(ae_ls_v, ars, -32); +} + +//--- ae_s32_l_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_x(ae_int32x2 ae_ls_v,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s32_l_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32.l.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_x(ae_ls_v, ars, art); +} + +//--- ae_s32_l_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_xc(ae_int32x2 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32_l_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32.l.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_xc(ae_ls_v, ars, art); +} + +//--- ae_s32_l_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_xp(ae_int32x2 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32_l_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32.l.xp(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_xp(ae_ls_v, ars, art); +} + +//--- ae_s32f24_l_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_i(ae_int32x2 ae_ls_v,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32f24_l_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32f24.l.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_i(ae_ls_v, ars, -32); +} + +//--- ae_s32f24_l_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_ip(ae_int32x2 ae_ls_v,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32f24_l_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32f24.l.ip(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_ip(ae_ls_v, ars, -32); +} + +//--- ae_s32f24_l_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_x(ae_int32x2 ae_ls_v,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s32f24_l_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32f24.l.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_x(ae_ls_v, ars, art); +} + +//--- ae_s32f24_l_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_xc(ae_int32x2 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32f24_l_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32f24.l.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_xc(ae_ls_v, ars, art); +} + +//--- ae_s32f24_l_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_xp(ae_int32x2 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32f24_l_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32f24.l.xp(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_xp(ae_ls_v, ars, art); +} + +//--- ae_s32m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_i(ae_int64 ae_ls_v,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32m_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32m.i(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_i(ae_ls_v, ars, -32); +} + +//--- ae_s32m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_iu(ae_int64 ae_ls_v,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32m.iu(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_iu(ae_ls_v, ars, -32); +} + +//--- ae_s32m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_x(ae_int64 ae_ls_v,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s32m_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32m.x(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_x(ae_ls_v, ars, art); +} + +//--- ae_s32m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_xc(ae_int64 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32m.xc(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_xc(ae_ls_v, ars, art); +} + +//--- ae_s32m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_xu(ae_int64 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32m.xu(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_xu(ae_ls_v, ars, art); +} + +//--- ae_s32ra64s_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_i(ae_int64 ae_ls_v1,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32ra64s_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32ra64s.i(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_i(ae_ls_v1, ars, -32); +} + +//--- ae_s32ra64s_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_ip(ae_int64 ae_ls_v1,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32ra64s_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32ra64s.ip(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_ip(ae_ls_v1, ars, -32); +} + +//--- ae_s32ra64s_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_x(ae_int64 ae_ls_v1,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s32ra64s_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32ra64s.x(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_x(ae_ls_v1, ars, art); +} + +//--- ae_s32ra64s_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_xc(ae_int64 ae_ls_v1,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32ra64s_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32ra64s.xc(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_xc(ae_ls_v1, ars, art); +} + +//--- ae_s32ra64s_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_xp(ae_int64 ae_ls_v1,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32ra64s_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32ra64s.xp(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_xp(ae_ls_v1, ars, art); +} + +//--- ae_s32x2_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_i(ae_int32x2 ae_ls_v,ae_int32x2* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s32x2_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32x2.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_i(ae_ls_v, ars, -64); +} + +//--- ae_s32x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_ip(ae_int32x2 ae_ls_v,ae_int32x2** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_s32x2_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.ip(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_ip(ae_ls_v, ars, 0); +} + +//--- ae_s32x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_ric(ae_int32x2 ae_ls_v,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.ric(<2 x i32> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_ric(ae_ls_v, ars); +} + +//--- ae_s32x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_rip(ae_int32x2 ae_ls_v,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.rip(<2 x i32> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_rip(ae_ls_v, ars); +} + +//--- ae_s32x2_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_x(ae_int32x2 ae_ls_v,ae_int32x2* ars,int art) { +// CHECK-LABEL: test_ae_s32x2_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32x2.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_x(ae_ls_v, ars, art); +} + +//--- ae_s32x2_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_xc(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_s32x2_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_xc(ae_ls_v, ars, art); +} + +//--- ae_s32x2_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_xp(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_s32x2_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.xp(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_xp(ae_ls_v, ars, art); +} + +//--- ae_s32x2f24_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_i(ae_int32x2 ae_ls_v,ae_int32x2* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s32x2f24_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32x2f24.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_i(ae_ls_v, ars, -64); +} + +//--- ae_s32x2f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_ip(ae_int32x2 ae_ls_v,ae_int32x2** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_s32x2f24_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.ip(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_ip(ae_ls_v, ars, 0); +} + +//--- ae_s32x2f24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_ric(ae_int32x2 ae_ls_v,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2f24_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.ric(<2 x i32> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_ric(ae_ls_v, ars); +} + +//--- ae_s32x2f24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_rip(ae_int32x2 ae_ls_v,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2f24_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.rip(<2 x i32> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_rip(ae_ls_v, ars); +} + +//--- ae_s32x2f24_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_x(ae_int32x2 ae_ls_v,ae_int32x2* ars,int art) { +// CHECK-LABEL: test_ae_s32x2f24_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32x2f24.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_x(ae_ls_v, ars, art); +} + +//--- ae_s32x2f24_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_xc(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_s32x2f24_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_xc(ae_ls_v, ars, art); +} + +//--- ae_s32x2f24_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_xp(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_s32x2f24_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.xp(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_xp(ae_ls_v, ars, art); +} + +//--- ae_s32x2ra64s_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2ra64s_ip(ae_int64 ae_ls_v2,ae_int64 ae_ls_v1,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2ra64s_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2ra64s.ip(<1 x i64> {{.*}}, <1 x i64> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2ra64s_ip(ae_ls_v2, ae_ls_v1, ars); +} + +//--- ae_s64_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_i(ae_int64 ae_ls_v,ae_int64* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s64_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s64.i(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_i(ae_ls_v, ars, -64); +} + +//--- ae_s64_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_ip(ae_int64 ae_ls_v,ae_int64** ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s64_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s64.ip(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_ip(ae_ls_v, ars, -64); +} + +//--- ae_s64_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_x(ae_int64 ae_ls_v,ae_int64* ars,int art) { +// CHECK-LABEL: test_ae_s64_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s64.x(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_x(ae_ls_v, ars, art); +} + +//--- ae_s64_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_xc(ae_int64 ae_ls_v,ae_int64** ars,int art) { +// CHECK-LABEL: test_ae_s64_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s64.xc(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_xc(ae_ls_v, ars, art); +} + +//--- ae_s64_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_xp(ae_int64 ae_ls_v,ae_int64** ars,int art) { +// CHECK-LABEL: test_ae_s64_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s64.xp(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_xp(ae_ls_v, ars, art); +} + +//--- ae_sa16x4_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa16x4_ic(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_sa16x4_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ic(<4 x i16> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa16x4_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa16x4_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa16x4_ip(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_sa16x4_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ip(<4 x i16> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa16x4_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa16x4_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa16x4_ric(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_sa16x4_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ric(<4 x i16> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa16x4_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa16x4_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa16x4_rip(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_sa16x4_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.rip(<4 x i16> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa16x4_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24_l_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24_l_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24_l_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ic(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24_l_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24_l_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24_l_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24_l_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24_l_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24_l_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24_l_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24_l_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ric(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24_l_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24_l_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24_l_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24_l_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.rip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24_l_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24x2_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24x2_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24x2_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ic(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24x2_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24x2_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24x2_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24x2_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24x2_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24x2_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ric(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24x2_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24x2_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24x2_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.rip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24x2_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ic(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ric(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.rip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2f24_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2f24_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2f24_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ic(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2f24_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2f24_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2f24_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2f24_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2f24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2f24_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2f24_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ric(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2f24_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2f24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2f24_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2f24_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.rip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2f24_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa64neg_fp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa64neg_fp(ae_valign* ae_ls_su,void* ars) { +// CHECK-LABEL: test_ae_sa64neg_fp +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.sa64neg.fp(<8 x i8> %[[LD_AE_LS_SU]], ptr {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sa64neg_fp(ae_ls_su, ars); +} + +//--- ae_sa64pos_fp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa64pos_fp(ae_valign* ae_ls_su,void* ars) { +// CHECK-LABEL: test_ae_sa64pos_fp +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.sa64pos.fp(<8 x i8> %[[LD_AE_LS_SU]], ptr {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sa64pos_fp(ae_ls_su, ars); +} + +//--- ae_salign64_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_salign64_i(ae_valign ae_ls_su,ae_valign* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_salign64_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.salign64.i(<8 x i8> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_salign64_i(ae_ls_su, ars, -64); +} + +//--- ae_sat16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sat16x4(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sat16x4 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sat16x4(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sat16x4(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sat24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sat24s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sat24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sat24s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sat24s(ae_arth_v1); +} + +//--- ae_sat48s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sat48s(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_sat48s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sat48s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sat48s(ae_arth_v1); +} + +//--- ae_satq56s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_satq56s(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_satq56s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.satq56s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_satq56s(ae_arth_v1); +} + +//--- ae_sb.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sb(short** ars,int art) { +// CHECK-LABEL: test_ae_sb +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sb(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sb(ars, art); +} + +//--- ae_sb_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sb_ic(short** ars,int art) { +// CHECK-LABEL: test_ae_sb_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sb.ic(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sb_ic(ars, art); +} + +//--- ae_sb_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sb_ip(short** ars,int art) { +// CHECK-LABEL: test_ae_sb_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sb.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sb_ip(ars, art); +} + +//--- ae_sbf.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbf(short** ars) { +// CHECK-LABEL: test_ae_sbf +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbf(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_sbf(ars); +} + +//--- ae_sbf_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbf_ic(short** ars) { +// CHECK-LABEL: test_ae_sbf_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbf.ic(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_sbf_ic(ars); +} + +//--- ae_sbf_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbf_ip(short** ars) { +// CHECK-LABEL: test_ae_sbf_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbf.ip(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_sbf_ip(ars); +} + +//--- ae_sbi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbi(short** ars,int art,immediate ae_ohba2) { +// CHECK-LABEL: test_ae_sbi +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbi(ptr %[[LD_ARS]], i32 {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sbi(ars, art, 1); +} + +//--- ae_sbi_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbi_ic(short** ars,int art,immediate ae_ohba2) { +// CHECK-LABEL: test_ae_sbi_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbi.ic(ptr %[[LD_ARS]], i32 {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sbi_ic(ars, art, 1); +} + +//--- ae_sbi_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbi_ip(short** ars,int art,immediate ae_ohba2) { +// CHECK-LABEL: test_ae_sbi_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbi.ip(ptr %[[LD_ARS]], i32 {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sbi_ip(ars, art, 1); +} + +//--- ae_sel16i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sel16i(ae_int16x4 ae_dr_to_dr_v0,ae_int16x4 ae_dr_to_dr_v1,immediate ae_selimm) { +// CHECK-LABEL: test_ae_sel16i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sel16i(<4 x i16> {{.*}}, <4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sel16i(ae_dr_to_dr_v0, ae_dr_to_dr_v1, 0); +} + +//--- ae_sel16i_n.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sel16i_n(ae_int16x4 ae_dr_to_dr_v0,ae_int16x4 ae_dr_to_dr_v1,immediate ae_selimm_N) { +// CHECK-LABEL: test_ae_sel16i_n +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sel16i.n(<4 x i16> {{.*}}, <4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sel16i_n(ae_dr_to_dr_v0, ae_dr_to_dr_v1, 0); +} + +//--- ae_sext32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sext32(ae_int32x2 ae_dr_to_dr_v0,immediate ae_opnd_tp7) { +// CHECK-LABEL: test_ae_sext32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sext32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sext32(ae_dr_to_dr_v0, 7); +} + +//--- ae_sext32x2d16_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sext32x2d16_10(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_sext32x2d16_10 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sext32x2d16.10(<4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sext32x2d16_10(ae_to_dr_v0); +} + +//--- ae_sext32x2d16_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sext32x2d16_32(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_sext32x2d16_32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sext32x2d16.32(<4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sext32x2d16_32(ae_to_dr_v0); +} + +//--- ae_sha32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_sha32(int ars) { +// CHECK-LABEL: test_ae_sha32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.sha32(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_sha32(ars); +} + +//--- ae_shortswap.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_shortswap(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_shortswap +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.shortswap(<4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_shortswap(ae_to_dr_v0); +} + +//--- ae_slaa16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_slaa16s(ae_int16x4 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.slaa16s(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_slaa16s(ae_shift_d0, ars); +} + +//--- ae_slaa32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slaa32(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slaa32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slaa32(ae_shift_d0, ars); +} + +//--- ae_slaa32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slaa32s(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slaa32s(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slaa32s(ae_shift_d0, ars); +} + +//--- ae_slaa64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slaa64(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slaa64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slaa64(ae_shift_d0, ars); +} + +//--- ae_slaa64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slaa64s(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slaa64s(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slaa64s(ae_shift_d0, ars); +} + +//--- ae_slaaq56.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slaaq56(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaaq56 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slaaq56(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slaaq56(ae_shift_d0, ars); +} + +//--- ae_slai16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_slai16s(ae_int16x4 ae_shift_d0,immediate ae_osa16) { +// CHECK-LABEL: test_ae_slai16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.slai16s(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_slai16s(ae_shift_d0, 0); +} + +//--- ae_slai24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slai24(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_slai24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slai24(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slai24(ae_shift_d0, 0); +} + +//--- ae_slai24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slai24s(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_slai24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slai24s(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slai24s(ae_shift_d0, 0); +} + +//--- ae_slai32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slai32(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_slai32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slai32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slai32(ae_shift_d0, 0); +} + +//--- ae_slai32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slai32s(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_slai32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slai32s(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slai32s(ae_shift_d0, 0); +} + +//--- ae_slai64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slai64(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_slai64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slai64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slai64(ae_shift_d0, 0); +} + +//--- ae_slai64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slai64s(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_slai64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slai64s(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slai64s(ae_shift_d0, 0); +} + +//--- ae_slaisq56s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slaisq56s(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_slaisq56s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slaisq56s(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slaisq56s(ae_shift_d0, 0); +} + +//--- ae_slas24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slas24(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slas24(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slas24(ae_shift_d0); +} + +//--- ae_slas24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slas24s(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slas24s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slas24s(ae_shift_d0); +} + +//--- ae_slas32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slas32(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slas32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slas32(ae_shift_d0); +} + +//--- ae_slas32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slas32s(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slas32s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slas32s(ae_shift_d0); +} + +//--- ae_slas64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slas64(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slas64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slas64(ae_shift_d0); +} + +//--- ae_slas64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slas64s(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slas64s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slas64s(ae_shift_d0); +} + +//--- ae_slasq56.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slasq56(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_slasq56 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slasq56(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slasq56(ae_shift_d0); +} + +//--- ae_slassq56s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slassq56s(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_slassq56s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slassq56s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slassq56s(ae_shift_d0); +} + +//--- ae_sra64_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sra64_32(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sra64_32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sra64.32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sra64_32(ae_shift_d0, ars); +} + +//--- ae_sraa16rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sraa16rs(ae_int16x4 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa16rs +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sraa16rs(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sraa16rs(ae_shift_d0, ars); +} + +//--- ae_sraa16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sraa16s(ae_int16x4 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sraa16s(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sraa16s(ae_shift_d0, ars); +} + +//--- ae_sraa32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sraa32(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sraa32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sraa32(ae_shift_d0, ars); +} + +//--- ae_sraa32rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sraa32rs(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa32rs +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sraa32rs(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sraa32rs(ae_shift_d0, ars); +} + +//--- ae_sraa32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sraa32s(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sraa32s(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sraa32s(ae_shift_d0, ars); +} + +//--- ae_sraa64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sraa64(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sraa64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sraa64(ae_shift_d0, ars); +} + +//--- ae_srai16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_srai16(ae_int16x4 ae_shift_d0,immediate ae_osa16) { +// CHECK-LABEL: test_ae_srai16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.srai16(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_srai16(ae_shift_d0, 0); +} + +//--- ae_srai16r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_srai16r(ae_int16x4 ae_shift_d0,immediate ae_osa16) { +// CHECK-LABEL: test_ae_srai16r +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.srai16r(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_srai16r(ae_shift_d0, 0); +} + +//--- ae_srai24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srai24(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srai24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srai24(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srai24(ae_shift_d0, 0); +} + +//--- ae_srai32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srai32(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srai32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srai32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srai32(ae_shift_d0, 0); +} + +//--- ae_srai32r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srai32r(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srai32r +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srai32r(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srai32r(ae_shift_d0, 0); +} + +//--- ae_srai64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_srai64(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_srai64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.srai64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_srai64(ae_shift_d0, 0); +} + +//--- ae_sras24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sras24(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_sras24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sras24(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sras24(ae_shift_d0); +} + +//--- ae_sras32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sras32(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_sras32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sras32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sras32(ae_shift_d0); +} + +//--- ae_sras64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sras64(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_sras64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sras64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sras64(ae_shift_d0); +} + +//--- ae_srla32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srla32(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_srla32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srla32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srla32(ae_shift_d0, ars); +} + +//--- ae_srla64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_srla64(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_srla64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.srla64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_srla64(ae_shift_d0, ars); +} + +//--- ae_srli24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srli24(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srli24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srli24(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srli24(ae_shift_d0, 0); +} + +//--- ae_srli32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srli32(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srli32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srli32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srli32(ae_shift_d0, 0); +} + +//--- ae_srli64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_srli64(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_srli64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.srli64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_srli64(ae_shift_d0, 0); +} + +//--- ae_srls24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srls24(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_srls24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srls24(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srls24(ae_shift_d0); +} + +//--- ae_srls32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srls32(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_srls32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srls32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srls32(ae_shift_d0); +} + +//--- ae_srls64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_srls64(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_srls64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.srls64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_srls64(ae_shift_d0); +} + +//--- ae_sub16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sub16(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sub16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sub16(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sub16s(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sub16s(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sub16s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sub24s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sub24s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sub24s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sub32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sub32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sub32(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sub32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sub32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sub32s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sub64(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sub64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sub64(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sub64s(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sub64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sub64s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_subadd32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_subadd32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_subadd32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.subadd32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_subadd32(ae_arth_v0, ae_arth_v1); +} + +//--- ae_subadd32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_subadd32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_subadd32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.subadd32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_subadd32s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_trunca32f64s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_trunca32f64s_l(ae_int32x2 ae_shift_d0,ae_int64 ae_shift_sd,int ars) { +// CHECK-LABEL: test_ae_trunca32f64s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.trunca32f64s.l(<2 x i32> {{.*}}, <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_trunca32f64s_l(ae_shift_d0, ae_shift_sd, ars); +} + +//--- ae_trunca32x2f64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_trunca32x2f64s(ae_int64 ae_shift_d0,ae_int64 ae_shift_sd,int ars) { +// CHECK-LABEL: test_ae_trunca32x2f64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.trunca32x2f64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_trunca32x2f64s(ae_shift_d0, ae_shift_sd, ars); +} + +//--- ae_trunci32f64s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_trunci32f64s_l(ae_int32x2 ae_shift_d0,ae_int64 ae_shift_sd,immediate ae_osa16) { +// CHECK-LABEL: test_ae_trunci32f64s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.trunci32f64s.l(<2 x i32> {{.*}}, <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_trunci32f64s_l(ae_shift_d0, ae_shift_sd, 0); +} + +//--- ae_trunci32x2f64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_trunci32x2f64s(ae_int64 ae_shift_d0,ae_int64 ae_shift_sd,immediate ae_osa16) { +// CHECK-LABEL: test_ae_trunci32x2f64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.trunci32x2f64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_trunci32x2f64s(ae_shift_d0, ae_shift_sd, 0); +} + +//--- ae_vldl16c.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl16c(const short** ars) { +// CHECK-LABEL: test_ae_vldl16c +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vldl16c(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vldl16c(ars); +} + +//--- ae_vldl16c_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl16c_ic(const short** ars) { +// CHECK-LABEL: test_ae_vldl16c_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vldl16c.ic(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vldl16c_ic(ars); +} + +//--- ae_vldl16c_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl16c_ip(const short** ars) { +// CHECK-LABEL: test_ae_vldl16c_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vldl16c.ip(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vldl16c_ip(ars); +} + +//--- ae_vldl16t.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl16t(xtbool* br,int* art,const short* ars) { +// CHECK-LABEL: test_ae_vldl16t +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i1>, i32 } @llvm.xtensa.ae.vldl16t(ptr {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i1>, i32 } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_vldl16t(br, art, ars); +} + +//--- ae_vldl32t.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl32t(xtbool* br,int* art,const int* ars) { +// CHECK-LABEL: test_ae_vldl32t +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i1>, i32 } @llvm.xtensa.ae.vldl32t(ptr {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i1>, i32 } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_vldl32t(br, art, ars); +} + +//--- ae_vldsht.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldsht(int art) { +// CHECK-LABEL: test_ae_vldsht +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.vldsht(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_vldsht(art); +} + +//--- ae_vlel16t.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vlel16t(xtbool* br,int* art,const short* ars) { +// CHECK-LABEL: test_ae_vlel16t +// CHECK: %[[LD_ART:.*]] = load i32, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i1>, i32 } @llvm.xtensa.ae.vlel16t(i32 %[[LD_ART]], ptr {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i1>, i32 } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_vlel16t(br, art, ars); +} + +//--- ae_vlel32t.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vlel32t(xtbool* br,int* art,const int* ars) { +// CHECK-LABEL: test_ae_vlel32t +// CHECK: %[[LD_ART:.*]] = load i32, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i1>, i32 } @llvm.xtensa.ae.vlel32t(i32 %[[LD_ART]], ptr {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i1>, i32 } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_vlel32t(br, art, ars); +} + +//--- ae_vles16c.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vles16c(short** ars) { +// CHECK-LABEL: test_ae_vles16c +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vles16c(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vles16c(ars); +} + +//--- ae_vles16c_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vles16c_ic(short** ars) { +// CHECK-LABEL: test_ae_vles16c_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vles16c.ic(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vles16c_ic(ars); +} + +//--- ae_vles16c_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vles16c_ip(short** ars) { +// CHECK-LABEL: test_ae_vles16c_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vles16c.ip(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vles16c_ip(ars); +} + +//--- ae_xor.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_xor(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) { +// CHECK-LABEL: test_ae_xor +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.xor(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_xor(ae_dr_to_dr_v0, ae_dr_to_dr_v1); +} + +//--- ae_zalign64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_valign test_ae_zalign64() { +// CHECK-LABEL: test_ae_zalign64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.zalign64() +// CHECK: %[[CAST:.*]] = bitcast <8 x i8> %[[RET]] to i64 +// CHECK: ret i64 %[[CAST]] +return __builtin_xtensa_ae_zalign64(); +} + +//--- rur_ae_bithead.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_bithead() { +// CHECK-LABEL: test_rur_ae_bithead +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.bithead() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_bithead(); +} + +//--- rur_ae_bitptr.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_bitptr() { +// CHECK-LABEL: test_rur_ae_bitptr +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.bitptr() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_bitptr(); +} + +//--- rur_ae_bitsused.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_bitsused() { +// CHECK-LABEL: test_rur_ae_bitsused +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.bitsused() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_bitsused(); +} + +//--- rur_ae_cbegin0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_cbegin0() { +// CHECK-LABEL: test_rur_ae_cbegin0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.cbegin0() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_cbegin0(); +} + +//--- rur_ae_cend0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_cend0() { +// CHECK-LABEL: test_rur_ae_cend0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.cend0() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_cend0(); +} + +//--- rur_ae_cw_sd_no.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_cw_sd_no() { +// CHECK-LABEL: test_rur_ae_cw_sd_no +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.cw.sd.no() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_cw_sd_no(); +} + +//--- rur_ae_cwrap.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_cwrap() { +// CHECK-LABEL: test_rur_ae_cwrap +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.cwrap() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_cwrap(); +} + +//--- rur_ae_first_ts.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_first_ts() { +// CHECK-LABEL: test_rur_ae_first_ts +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.first.ts() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_first_ts(); +} + +//--- rur_ae_nextoffset.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_nextoffset() { +// CHECK-LABEL: test_rur_ae_nextoffset +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.nextoffset() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_nextoffset(); +} + +//--- rur_ae_overflow.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_overflow() { +// CHECK-LABEL: test_rur_ae_overflow +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.overflow() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_overflow(); +} + +//--- rur_ae_ovf_sar.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_ovf_sar() { +// CHECK-LABEL: test_rur_ae_ovf_sar +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.ovf.sar() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_ovf_sar(); +} + +//--- rur_ae_sar.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_sar() { +// CHECK-LABEL: test_rur_ae_sar +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.sar() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_sar(); +} + +//--- rur_ae_searchdone.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_searchdone() { +// CHECK-LABEL: test_rur_ae_searchdone +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.searchdone() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_searchdone(); +} + +//--- rur_ae_tablesize.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_tablesize() { +// CHECK-LABEL: test_rur_ae_tablesize +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.tablesize() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_tablesize(); +} + +//--- rur_ae_ts_fts_bu_bp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_ts_fts_bu_bp() { +// CHECK-LABEL: test_rur_ae_ts_fts_bu_bp +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.ts.fts.bu.bp() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_ts_fts_bu_bp(); +} + +//--- wur_ae_bithead.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_bithead(int art) { +// CHECK-LABEL: test_wur_ae_bithead +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.bithead(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_bithead(art); +} + +//--- wur_ae_bitptr.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_bitptr(int art) { +// CHECK-LABEL: test_wur_ae_bitptr +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.bitptr(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_bitptr(art); +} + +//--- wur_ae_bitsused.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_bitsused(int art) { +// CHECK-LABEL: test_wur_ae_bitsused +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.bitsused(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_bitsused(art); +} + +//--- wur_ae_cbegin0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_cbegin0(int art) { +// CHECK-LABEL: test_wur_ae_cbegin0 +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.cbegin0(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_cbegin0(art); +} + +//--- wur_ae_cend0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_cend0(int art) { +// CHECK-LABEL: test_wur_ae_cend0 +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.cend0(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_cend0(art); +} + +//--- wur_ae_cw_sd_no.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_cw_sd_no(int art) { +// CHECK-LABEL: test_wur_ae_cw_sd_no +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.cw.sd.no(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_cw_sd_no(art); +} + +//--- wur_ae_cwrap.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_cwrap(int art) { +// CHECK-LABEL: test_wur_ae_cwrap +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.cwrap(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_cwrap(art); +} + +//--- wur_ae_first_ts.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_first_ts(int art) { +// CHECK-LABEL: test_wur_ae_first_ts +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.first.ts(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_first_ts(art); +} + +//--- wur_ae_nextoffset.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_nextoffset(int art) { +// CHECK-LABEL: test_wur_ae_nextoffset +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.nextoffset(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_nextoffset(art); +} + +//--- wur_ae_overflow.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_overflow(int art) { +// CHECK-LABEL: test_wur_ae_overflow +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.overflow(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_overflow(art); +} + +//--- wur_ae_ovf_sar.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_ovf_sar(int art) { +// CHECK-LABEL: test_wur_ae_ovf_sar +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.ovf.sar(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_ovf_sar(art); +} + +//--- wur_ae_sar.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_sar(int art) { +// CHECK-LABEL: test_wur_ae_sar +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.sar(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_sar(art); +} + +//--- wur_ae_searchdone.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_searchdone(int art) { +// CHECK-LABEL: test_wur_ae_searchdone +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.searchdone(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_searchdone(art); +} + +//--- wur_ae_tablesize.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_tablesize(int art) { +// CHECK-LABEL: test_wur_ae_tablesize +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.tablesize(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_tablesize(art); +} + +//--- wur_ae_ts_fts_bu_bp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_ts_fts_bu_bp(int art) { +// CHECK-LABEL: test_wur_ae_ts_fts_bu_bp +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.ts.fts.bu.bp(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_ts_fts_bu_bp(art); +} + diff --git a/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c new file mode 100644 index 0000000000000..62689e9b591da --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c @@ -0,0 +1,48 @@ +// RUN: %clang_cc1 -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; + +xtbool test_xtbool_movt(float a, float b, xtbool c) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.movt.s(float %{{.*}}, float {{.*}}, <1 x i1> {{.*}}) + return __builtin_xtensa_xt_movt_s(a, b, c); +} + +xtbool test_xtbool_movf(float a, float b, xtbool c) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.movf.s(float %{{.*}}, float {{.*}}, <1 x i1> {{.*}}) + return __builtin_xtensa_xt_movf_s(a, b, c); +} + +xtbool test_xtbool_oeq_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.oeq.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_oeq_s(a, b); +} + +xtbool test_xtbool_ueq_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.ueq.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_ueq_s(a, b); +} + +xtbool test_xtbool_olt_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.olt.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_olt_s(a, b); +} + +xtbool test_xtbool_ult_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.ult.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_ult_s(a, b); +} + +xtbool test_xtbool_ole_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.ole.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_ole_s(a, b); +} + +xtbool test_xtbool_ule_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.ule.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_ule_s(a, b); +} + +xtbool test_xtbool_un_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.un.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_un_s(a, b); +} \ No newline at end of file diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index 1a7c1252a6ee6..c80de35a105e4 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -500,6 +500,7 @@ def llvm_aarch64_svcount_ty : LLVMType; def llvm_x86amx_ty : LLVMType; +def llvm_v1i1_ty : LLVMType; // 1 x i1 def llvm_v2i1_ty : LLVMType; // 2 x i1 def llvm_v4i1_ty : LLVMType; // 4 x i1 def llvm_v8i1_ty : LLVMType; // 8 x i1 diff --git a/llvm/include/llvm/IR/IntrinsicsXtensa.td b/llvm/include/llvm/IR/IntrinsicsXtensa.td index 046df476967f3..3ac44dba439fb 100644 --- a/llvm/include/llvm/IR/IntrinsicsXtensa.td +++ b/llvm/include/llvm/IR/IntrinsicsXtensa.td @@ -249,7 +249,188 @@ def int_xtensa_xsr_m3: ClangBuiltin<"__builtin_xtensa_xsr_m3">, Intrinsic<[], [llvm_ptr_ty], []>; +//===----------------------------------------------------------------------===// +// Float operations + +def int_xtensa_xt_movt_s: ClangBuiltin<"__builtin_xtensa_xt_movt_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_v1i1_ty], [IntrNoMem]>; + +def int_xtensa_xt_movf_s: ClangBuiltin<"__builtin_xtensa_xt_movf_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_v1i1_ty], [IntrNoMem]>; + +def int_xtensa_xt_oeq_s: ClangBuiltin<"__builtin_xtensa_xt_oeq_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ole_s: ClangBuiltin<"__builtin_xtensa_xt_ole_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_olt_s: ClangBuiltin<"__builtin_xtensa_xt_olt_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ueq_s: ClangBuiltin<"__builtin_xtensa_xt_ueq_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ule_s: ClangBuiltin<"__builtin_xtensa_xt_ule_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ult_s: ClangBuiltin<"__builtin_xtensa_xt_ult_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_un_s: ClangBuiltin<"__builtin_xtensa_xt_un_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_add_s: ClangBuiltin<"__builtin_xtensa_xt_add_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_abs_s: ClangBuiltin<"__builtin_xtensa_xt_abs_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_mul_s: ClangBuiltin<"__builtin_xtensa_xt_mul_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_sub_s: ClangBuiltin<"__builtin_xtensa_xt_sub_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_trunc_s: ClangBuiltin<"__builtin_xtensa_xt_trunc_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_utrunc_s: ClangBuiltin<"__builtin_xtensa_xt_utrunc_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_float_s: ClangBuiltin<"__builtin_xtensa_xt_float_s">, + Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_ufloat_s: ClangBuiltin<"__builtin_xtensa_xt_ufloat_s">, + Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_addexp_s: ClangBuiltin<"__builtin_xtensa_xt_addexp_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_addexpm_s: ClangBuiltin<"__builtin_xtensa_xt_addexpm_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ceil_s: ClangBuiltin<"__builtin_xtensa_xt_ceil_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_div0_s: ClangBuiltin<"__builtin_xtensa_xt_div0_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_divn_s: ClangBuiltin<"__builtin_xtensa_xt_divn_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_floor_s: ClangBuiltin<"__builtin_xtensa_xt_floor_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_lsi: ClangBuiltin<"__builtin_xtensa_xt_lsi">, + Intrinsic<[llvm_float_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_xt_lsip: + Intrinsic<[llvm_float_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_xt_lsx: ClangBuiltin<"__builtin_xtensa_xt_lsx">, + Intrinsic<[llvm_float_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_xt_lsxp: + Intrinsic<[llvm_float_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_xt_madd_s: ClangBuiltin<"__builtin_xtensa_xt_madd_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_maddn_s: ClangBuiltin<"__builtin_xtensa_xt_maddn_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_mkdadj_s: ClangBuiltin<"__builtin_xtensa_xt_mkdadj_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_mksadj_s: ClangBuiltin<"__builtin_xtensa_xt_mksadj_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_mov_s: ClangBuiltin<"__builtin_xtensa_xt_mov_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_moveqz_s: ClangBuiltin<"__builtin_xtensa_xt_moveqz_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_movgez_s: ClangBuiltin<"__builtin_xtensa_xt_movgez_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_movltz_s: ClangBuiltin<"__builtin_xtensa_xt_movltz_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_movnez_s: ClangBuiltin<"__builtin_xtensa_xt_movnez_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_msub_s: ClangBuiltin<"__builtin_xtensa_xt_msub_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_neg_s: ClangBuiltin<"__builtin_xtensa_xt_neg_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_nexp01_s: ClangBuiltin<"__builtin_xtensa_xt_nexp01_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_recip0_s: ClangBuiltin<"__builtin_xtensa_xt_recip0_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_rfr: ClangBuiltin<"__builtin_xtensa_xt_rfr">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_round_s: ClangBuiltin<"__builtin_xtensa_xt_round_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_rsqrt0_s: ClangBuiltin<"__builtin_xtensa_xt_rsqrt0_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_rur_fcr: ClangBuiltin<"__builtin_xtensa_xt_rur_fcr">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_xt_rur_fsr: ClangBuiltin<"__builtin_xtensa_xt_rur_fsr">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_xt_sqrt0_s: ClangBuiltin<"__builtin_xtensa_xt_sqrt0_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ssi: ClangBuiltin<"__builtin_xtensa_xt_ssi">, + Intrinsic<[], [llvm_float_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_xt_ssip: ClangBuiltin<"__builtin_xtensa_xt_ssip">, + Intrinsic<[llvm_ptr_ty], [llvm_float_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_xt_ssx: ClangBuiltin<"__builtin_xtensa_xt_ssx">, + Intrinsic<[], [llvm_float_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_xt_ssxp: ClangBuiltin<"__builtin_xtensa_xt_ssxp">, + Intrinsic<[llvm_ptr_ty], [llvm_float_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_xt_wfr: ClangBuiltin<"__builtin_xtensa_xt_wfr">, + Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_wur_fcr: ClangBuiltin<"__builtin_xtensa_xt_wur_fcr">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_xt_wur_fsr: ClangBuiltin<"__builtin_xtensa_xt_wur_fsr">, + Intrinsic<[], [llvm_i32_ty], []>; + // Generated code // --------------- include "llvm/IR/IntrinsicsXtensaESP32S3.td" -} \ No newline at end of file + +//===----------------------------------------------------------------------===// +// HiFi3 Intrinsics +//===----------------------------------------------------------------------===// + +//Extended Access +def int_xtensa_xt_l32ex: ClangBuiltin<"__builtin_xtensa_xt_l32ex">, + Intrinsic<[llvm_i32_ty], [LLVMQualPointerType<0>],[]>; + +def int_xtensa_xt_s32ex: + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, LLVMQualPointerType<0>],[]>; + +def int_xtensa_xt_getex: + Intrinsic<[llvm_i32_ty], [llvm_i32_ty],[]>; + +def int_xtensa_xt_clrex: ClangBuiltin<"__builtin_xtensa_xt_clrex">, + Intrinsic<[], [],[]>; + +include "llvm/IR/IntrinsicsXtensaHIFI.td" +} diff --git a/llvm/include/llvm/IR/IntrinsicsXtensaHIFI.td b/llvm/include/llvm/IR/IntrinsicsXtensaHIFI.td new file mode 100644 index 0000000000000..662e942829cc8 --- /dev/null +++ b/llvm/include/llvm/IR/IntrinsicsXtensaHIFI.td @@ -0,0 +1,2613 @@ +//===- IntrinsicsXtensaHIFI.td - Defines Xtensa HIFI intrinsics -----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines all of the intrinsics for Xtensa HIFI extension. +// +//===----------------------------------------------------------------------===// + +def int_xtensa_ae_abs16s: ClangBuiltin<"__builtin_xtensa_ae_abs16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs24s: ClangBuiltin<"__builtin_xtensa_ae_abs24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs32: ClangBuiltin<"__builtin_xtensa_ae_abs32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs32s: ClangBuiltin<"__builtin_xtensa_ae_abs32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs64: ClangBuiltin<"__builtin_xtensa_ae_abs64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs64s: ClangBuiltin<"__builtin_xtensa_ae_abs64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_add16: ClangBuiltin<"__builtin_xtensa_ae_add16">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_add16s: ClangBuiltin<"__builtin_xtensa_ae_add16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_add24s: ClangBuiltin<"__builtin_xtensa_ae_add24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_add32: ClangBuiltin<"__builtin_xtensa_ae_add32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_add32_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_add32_hl_lh">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_add32s: ClangBuiltin<"__builtin_xtensa_ae_add32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_add64: ClangBuiltin<"__builtin_xtensa_ae_add64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_add64s: ClangBuiltin<"__builtin_xtensa_ae_add64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_addbrba32: ClangBuiltin<"__builtin_xtensa_ae_addbrba32">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_addsub32: ClangBuiltin<"__builtin_xtensa_ae_addsub32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_addsub32s: ClangBuiltin<"__builtin_xtensa_ae_addsub32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_and: ClangBuiltin<"__builtin_xtensa_ae_and">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt32x2f16_10: ClangBuiltin<"__builtin_xtensa_ae_cvt32x2f16_10">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt32x2f16_32: ClangBuiltin<"__builtin_xtensa_ae_cvt32x2f16_32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt48a32: ClangBuiltin<"__builtin_xtensa_ae_cvt48a32">, + Intrinsic<[llvm_v1i64_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt64a32: ClangBuiltin<"__builtin_xtensa_ae_cvt64a32">, + Intrinsic<[llvm_v1i64_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt64f32_h: ClangBuiltin<"__builtin_xtensa_ae_cvt64f32_h">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvta32f24s_h: ClangBuiltin<"__builtin_xtensa_ae_cvta32f24s_h">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvta32f24s_l: ClangBuiltin<"__builtin_xtensa_ae_cvta32f24s_l">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvtq56a32s: ClangBuiltin<"__builtin_xtensa_ae_cvtq56a32s">, + Intrinsic<[llvm_v1i64_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvtq56p32s_h: ClangBuiltin<"__builtin_xtensa_ae_cvtq56p32s_h">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvtq56p32s_l: ClangBuiltin<"__builtin_xtensa_ae_cvtq56p32s_l">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_db: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_db_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_db_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_dbi: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_dbi_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_dbi_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_div64d32_h: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_div64d32_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_eq16: + Intrinsic<[llvm_v4i1_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_eq32: + Intrinsic<[llvm_v2i1_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_eq64: + Intrinsic<[llvm_v1i1_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_l16_i: ClangBuiltin<"__builtin_xtensa_ae_l16_i">, + Intrinsic<[llvm_v4i16_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16_ip: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16_x: ClangBuiltin<"__builtin_xtensa_ae_l16_x">, + Intrinsic<[llvm_v4i16_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16_xc: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16_xp: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16m_i: ClangBuiltin<"__builtin_xtensa_ae_l16m_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16m_iu: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16m_x: ClangBuiltin<"__builtin_xtensa_ae_l16m_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16m_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16m_xu: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x2m_i: ClangBuiltin<"__builtin_xtensa_ae_l16x2m_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16x2m_iu: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16x2m_x: ClangBuiltin<"__builtin_xtensa_ae_l16x2m_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x2m_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x2m_xu: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x4_i: ClangBuiltin<"__builtin_xtensa_ae_l16x4_i">, + Intrinsic<[llvm_v4i16_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16x4_ip: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16x4_ric: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l16x4_rip: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l16x4_x: ClangBuiltin<"__builtin_xtensa_ae_l16x4_x">, + Intrinsic<[llvm_v4i16_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x4_xc: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x4_xp: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32_i: ClangBuiltin<"__builtin_xtensa_ae_l32_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32_ip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32_x: ClangBuiltin<"__builtin_xtensa_ae_l32_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32_xp: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32f24_i: ClangBuiltin<"__builtin_xtensa_ae_l32f24_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32f24_ip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32f24_x: ClangBuiltin<"__builtin_xtensa_ae_l32f24_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32f24_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32f24_xp: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32m_i: ClangBuiltin<"__builtin_xtensa_ae_l32m_i">, + Intrinsic<[llvm_v1i64_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32m_iu: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32m_x: ClangBuiltin<"__builtin_xtensa_ae_l32m_x">, + Intrinsic<[llvm_v1i64_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32m_xc: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32m_xu: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2_i: ClangBuiltin<"__builtin_xtensa_ae_l32x2_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32x2_ip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32x2_ric: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l32x2_rip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l32x2_x: ClangBuiltin<"__builtin_xtensa_ae_l32x2_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2_xp: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2f24_i: ClangBuiltin<"__builtin_xtensa_ae_l32x2f24_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32x2f24_ip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32x2f24_ric: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l32x2f24_rip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l32x2f24_x: ClangBuiltin<"__builtin_xtensa_ae_l32x2f24_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2f24_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2f24_xp: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l64_i: ClangBuiltin<"__builtin_xtensa_ae_l64_i">, + Intrinsic<[llvm_v1i64_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l64_ip: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l64_x: ClangBuiltin<"__builtin_xtensa_ae_l64_x">, + Intrinsic<[llvm_v1i64_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l64_xc: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l64_xp: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_la16x4_ic: + Intrinsic<[llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4_ip: + Intrinsic<[llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4_ric: + Intrinsic<[llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4_rip: + Intrinsic<[llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4neg_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4pos_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la24_ic: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24_ip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24_ric: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24_rip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24neg_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la24pos_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2_ic: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2_ip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2_ric: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2_rip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2neg_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2pos_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2_ic: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2_ip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2_ric: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2_rip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2f24_ic: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2f24_ip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2f24_ric: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2f24_rip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2neg_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2pos_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la64_pp: ClangBuiltin<"__builtin_xtensa_ae_la64_pp">, + Intrinsic<[llvm_v8i8_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_lalign64_i: ClangBuiltin<"__builtin_xtensa_ae_lalign64_i">, + Intrinsic<[llvm_v8i8_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_lb: ClangBuiltin<"__builtin_xtensa_ae_lb">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_lbi: ClangBuiltin<"__builtin_xtensa_ae_lbi">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_lbk: ClangBuiltin<"__builtin_xtensa_ae_lbk">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_lbki: ClangBuiltin<"__builtin_xtensa_ae_lbki">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_lbs: ClangBuiltin<"__builtin_xtensa_ae_lbs">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_lbsi: ClangBuiltin<"__builtin_xtensa_ae_lbsi">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_le16: + Intrinsic<[llvm_v4i1_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_le32: + Intrinsic<[llvm_v2i1_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_le64: + Intrinsic<[llvm_v1i1_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_lt16: + Intrinsic<[llvm_v4i1_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_lt32: + Intrinsic<[llvm_v2i1_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_lt64: + Intrinsic<[llvm_v1i1_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_max32: ClangBuiltin<"__builtin_xtensa_ae_max32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_max64: ClangBuiltin<"__builtin_xtensa_ae_max64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_maxabs32s: ClangBuiltin<"__builtin_xtensa_ae_maxabs32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_maxabs64s: ClangBuiltin<"__builtin_xtensa_ae_maxabs64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_min32: ClangBuiltin<"__builtin_xtensa_ae_min32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_min64: ClangBuiltin<"__builtin_xtensa_ae_min64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_minabs32s: ClangBuiltin<"__builtin_xtensa_ae_minabs32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_minabs64s: ClangBuiltin<"__builtin_xtensa_ae_minabs64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_mov: ClangBuiltin<"__builtin_xtensa_ae_mov">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad16_0: ClangBuiltin<"__builtin_xtensa_ae_movad16_0">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad16_1: ClangBuiltin<"__builtin_xtensa_ae_movad16_1">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad16_2: ClangBuiltin<"__builtin_xtensa_ae_movad16_2">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad16_3: ClangBuiltin<"__builtin_xtensa_ae_movad16_3">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad32_h: ClangBuiltin<"__builtin_xtensa_ae_movad32_h">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad32_l: ClangBuiltin<"__builtin_xtensa_ae_movad32_l">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movalign: ClangBuiltin<"__builtin_xtensa_ae_movalign">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty], [IntrNoMem]>; + +def int_xtensa_ae_movda16: ClangBuiltin<"__builtin_xtensa_ae_movda16">, + Intrinsic<[llvm_v4i16_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movda16x2: ClangBuiltin<"__builtin_xtensa_ae_movda16x2">, + Intrinsic<[llvm_v4i16_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movda32: ClangBuiltin<"__builtin_xtensa_ae_movda32">, + Intrinsic<[llvm_v1i32_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movda32x2: ClangBuiltin<"__builtin_xtensa_ae_movda32x2">, + Intrinsic<[llvm_v2i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movf16x4: + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty, llvm_v4i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movf32x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movf64: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v1i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movi: ClangBuiltin<"__builtin_xtensa_ae_movi">, + Intrinsic<[llvm_v2i32_ty], [llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_movt16x4: + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty, llvm_v4i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movt32x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movt64: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v1i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul16x4: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32_hh: ClangBuiltin<"__builtin_xtensa_ae_mul32_hh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32_lh: ClangBuiltin<"__builtin_xtensa_ae_mul32_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32_ll: ClangBuiltin<"__builtin_xtensa_ae_mul32_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32u_ll: ClangBuiltin<"__builtin_xtensa_ae_mul32u_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h0: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h0_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h1: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h1_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h2_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h3: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h3_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l0: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l1: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l1_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l3: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l3_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula16x4: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32u_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h0_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h0_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h2_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h2_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_11_00: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_11_00_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_13_02: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_13_02_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_33_22: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_33_22_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h0_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h0_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h2_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h2_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulac24: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulac32x16_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulac32x16_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_00: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_00_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_10: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_11: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_20: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_21: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_22: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_30: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_31: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_32: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_33: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16x4ss: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32r_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32r_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32r_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32r_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32s_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32s_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32s_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32s_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf48q32sp16s_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf48q32sp16s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf48q32sp16u_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf48q32sp16u_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafc24ra: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafc32x16ras_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafc32x16ras_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd24x2_fir_h: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd24x2_fir_l: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd32x16x2_fir_hh: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd32x16x2_fir_hl: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd32x16x2_fir_lh: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd32x16x2_fir_ll: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp24x2r: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp24x2r_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp24x2ra: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp24x2ra_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2ras_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2ras_h_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2ras_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2ras_l_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2rs_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2rs_h_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2rs_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2rs_l_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x2ras: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x2rs: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafq32sp24s_h_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafq32sp24s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap24x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap24x2_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap32x16x2_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap32x16x2_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap32x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaq32sp16s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaq32sp16u_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mularfq32sp24s_h_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mularfq32sp24s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_hh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulc24: ClangBuiltin<"__builtin_xtensa_ae_mulc24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulc32x16_h: ClangBuiltin<"__builtin_xtensa_ae_mulc32x16_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulc32x16_l: ClangBuiltin<"__builtin_xtensa_ae_mulc32x16_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_00: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_00">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_00_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_00_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_10: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_10">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_11: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_11">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_20: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_20">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_21: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_21">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_22: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_22">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_30: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_30">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_31: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_31">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_32: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_33: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_33">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16x4ss: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32r_hh: ClangBuiltin<"__builtin_xtensa_ae_mulf32r_hh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32r_lh: ClangBuiltin<"__builtin_xtensa_ae_mulf32r_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32r_ll: ClangBuiltin<"__builtin_xtensa_ae_mulf32r_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32r_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32r_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32s_hh: ClangBuiltin<"__builtin_xtensa_ae_mulf32s_hh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32s_lh: ClangBuiltin<"__builtin_xtensa_ae_mulf32s_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32s_ll: ClangBuiltin<"__builtin_xtensa_ae_mulf32s_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32s_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32s_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h0: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h1: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h1_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h3: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h3_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l0: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l1: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l1_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l3: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l3_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf48q32sp16s_l: ClangBuiltin<"__builtin_xtensa_ae_mulf48q32sp16s_l">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf48q32sp16s_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf48q32sp16s_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf48q32sp16u_l: ClangBuiltin<"__builtin_xtensa_ae_mulf48q32sp16u_l">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf48q32sp16u_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf48q32sp16u_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfc24ra: ClangBuiltin<"__builtin_xtensa_ae_mulfc24ra">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfc32x16ras_h: ClangBuiltin<"__builtin_xtensa_ae_mulfc32x16ras_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfc32x16ras_l: ClangBuiltin<"__builtin_xtensa_ae_mulfc32x16ras_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd24x2_fir_h: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd24x2_fir_l: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd32x16x2_fir_hh: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd32x16x2_fir_hl: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd32x16x2_fir_lh: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd32x16x2_fir_ll: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp16x4ras: ClangBuiltin<"__builtin_xtensa_ae_mulfp16x4ras">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp16x4s: ClangBuiltin<"__builtin_xtensa_ae_mulfp16x4s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp24x2r: ClangBuiltin<"__builtin_xtensa_ae_mulfp24x2r">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp24x2r_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp24x2r_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp24x2ra: ClangBuiltin<"__builtin_xtensa_ae_mulfp24x2ra">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp24x2ra_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp24x2ra_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2ras_h: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2ras_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2ras_h_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2ras_h_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2ras_l: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2ras_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2ras_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2ras_l_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2rs_h: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2rs_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2rs_h_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2rs_h_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2rs_l: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2rs_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2rs_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2rs_l_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x2ras: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x2ras">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x2rs: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x2rs">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfq32sp24s_h_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfq32sp24s_h_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfq32sp24s_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfq32sp24s_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp24x2: ClangBuiltin<"__builtin_xtensa_ae_mulp24x2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp24x2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulp24x2_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp32x16x2_h: ClangBuiltin<"__builtin_xtensa_ae_mulp32x16x2_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp32x16x2_l: ClangBuiltin<"__builtin_xtensa_ae_mulp32x16x2_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp32x2: ClangBuiltin<"__builtin_xtensa_ae_mulp32x2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulq32sp16s_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulq32sp16s_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulq32sp16u_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulq32sp16u_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulrfq32sp24s_h_s2: ClangBuiltin<"__builtin_xtensa_ae_mulrfq32sp24s_h_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulrfq32sp24s_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulrfq32sp24s_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls16x4: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_hh: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_hh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_hh_s2: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_hh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_lh: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_ll: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32u_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_00: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_00_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_10: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_11: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_20: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_21: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_22: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_30: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_31: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_32: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_33: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16x4ss: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32r_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32r_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32r_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32r_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32s_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32s_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32s_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf48q32sp16s_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf48q32sp16s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf48q32sp16u_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf48q32sp16u_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp24x2r: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp24x2r_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp24x2ra: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp24x2ra_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2ras_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2ras_h_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2ras_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2ras_l_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2rs_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2rs_h_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2rs_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2rs_l_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x2ras: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x2rs: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfq32sp24s_h_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfq32sp24s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp24x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp24x2_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp32x16x2_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp32x16x2_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp32x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsq32sp16s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsq32sp16u_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsrfq32sp24s_h_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsrfq32sp24s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_hh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_11_00: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_11_00_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_13_02: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_13_02_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_33_22: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_33_22_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzaad24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzaad24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h0_l1: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h0_l1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h0_l1_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h0_l1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h2_l3: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h2_l3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h2_l3_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h2_l3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_11_00: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_11_00">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_11_00_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_11_00_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_13_02: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_13_02">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_13_02_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_13_02_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_33_22: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_33_22">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_33_22_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_33_22_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h0_l1: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h0_l1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h0_l1_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h2_l3: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h2_l3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h2_l3_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzasd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzasd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzasd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzsad24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsad24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzsad32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsad32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzsad32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsad32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzssd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzssd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzssd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_11_00: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_11_00">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_11_00_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_11_00_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_13_02: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_13_02">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_13_02_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_13_02_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_33_22: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_33_22">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_33_22_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_33_22_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_nand: ClangBuiltin<"__builtin_xtensa_ae_nand">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg16s: ClangBuiltin<"__builtin_xtensa_ae_neg16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg24s: ClangBuiltin<"__builtin_xtensa_ae_neg24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg32: ClangBuiltin<"__builtin_xtensa_ae_neg32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg32s: ClangBuiltin<"__builtin_xtensa_ae_neg32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg64: ClangBuiltin<"__builtin_xtensa_ae_neg64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg64s: ClangBuiltin<"__builtin_xtensa_ae_neg64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_nsa64: ClangBuiltin<"__builtin_xtensa_ae_nsa64">, + Intrinsic<[llvm_i32_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_nsaz16_0: ClangBuiltin<"__builtin_xtensa_ae_nsaz16_0">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_nsaz32_l: ClangBuiltin<"__builtin_xtensa_ae_nsaz32_l">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_or: ClangBuiltin<"__builtin_xtensa_ae_or">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_pksr24: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_pksr32: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_round16x4f32sasym: ClangBuiltin<"__builtin_xtensa_ae_round16x4f32sasym">, + Intrinsic<[llvm_v4i16_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_round16x4f32ssym: ClangBuiltin<"__builtin_xtensa_ae_round16x4f32ssym">, + Intrinsic<[llvm_v4i16_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_round24x2f48sasym: ClangBuiltin<"__builtin_xtensa_ae_round24x2f48sasym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round24x2f48ssym: ClangBuiltin<"__builtin_xtensa_ae_round24x2f48ssym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round32x2f48sasym: ClangBuiltin<"__builtin_xtensa_ae_round32x2f48sasym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round32x2f48ssym: ClangBuiltin<"__builtin_xtensa_ae_round32x2f48ssym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round32x2f64sasym: ClangBuiltin<"__builtin_xtensa_ae_round32x2f64sasym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round32x2f64ssym: ClangBuiltin<"__builtin_xtensa_ae_round32x2f64ssym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsp16f24asym: ClangBuiltin<"__builtin_xtensa_ae_roundsp16f24asym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsp16f24sym: ClangBuiltin<"__builtin_xtensa_ae_roundsp16f24sym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsp16q48x2asym: ClangBuiltin<"__builtin_xtensa_ae_roundsp16q48x2asym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsp16q48x2sym: ClangBuiltin<"__builtin_xtensa_ae_roundsp16q48x2sym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsq32f48asym: ClangBuiltin<"__builtin_xtensa_ae_roundsq32f48asym">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsq32f48sym: ClangBuiltin<"__builtin_xtensa_ae_roundsq32f48sym">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_s16_0_i: ClangBuiltin<"__builtin_xtensa_ae_s16_0_i">, + Intrinsic<[], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16_0_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16_0_x: ClangBuiltin<"__builtin_xtensa_ae_s16_0_x">, + Intrinsic<[], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16_0_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16_0_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16m_l_i: ClangBuiltin<"__builtin_xtensa_ae_s16m_l_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16m_l_iu: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16m_l_x: ClangBuiltin<"__builtin_xtensa_ae_s16m_l_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16m_l_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16m_l_xu: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x2m_i: ClangBuiltin<"__builtin_xtensa_ae_s16x2m_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16x2m_iu: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16x2m_x: ClangBuiltin<"__builtin_xtensa_ae_s16x2m_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x2m_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x2m_xu: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x4_i: ClangBuiltin<"__builtin_xtensa_ae_s16x4_i">, + Intrinsic<[], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16x4_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16x4_ric: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s16x4_rip: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s16x4_x: ClangBuiltin<"__builtin_xtensa_ae_s16x4_x">, + Intrinsic<[], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x4_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x4_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s24ra64s_i: ClangBuiltin<"__builtin_xtensa_ae_s24ra64s_i">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s24ra64s_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s24ra64s_x: ClangBuiltin<"__builtin_xtensa_ae_s24ra64s_x">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s24ra64s_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s24ra64s_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s24x2ra64s_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32_l_i: ClangBuiltin<"__builtin_xtensa_ae_s32_l_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32_l_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32_l_x: ClangBuiltin<"__builtin_xtensa_ae_s32_l_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32_l_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32_l_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32f24_l_i: ClangBuiltin<"__builtin_xtensa_ae_s32f24_l_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32f24_l_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32f24_l_x: ClangBuiltin<"__builtin_xtensa_ae_s32f24_l_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32f24_l_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32f24_l_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32m_i: ClangBuiltin<"__builtin_xtensa_ae_s32m_i">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32m_iu: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32m_x: ClangBuiltin<"__builtin_xtensa_ae_s32m_x">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32m_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32m_xu: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32ra64s_i: ClangBuiltin<"__builtin_xtensa_ae_s32ra64s_i">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32ra64s_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32ra64s_x: ClangBuiltin<"__builtin_xtensa_ae_s32ra64s_x">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32ra64s_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32ra64s_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2_i: ClangBuiltin<"__builtin_xtensa_ae_s32x2_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32x2_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32x2_ric: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32x2_rip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32x2_x: ClangBuiltin<"__builtin_xtensa_ae_s32x2_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2f24_i: ClangBuiltin<"__builtin_xtensa_ae_s32x2f24_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32x2f24_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32x2f24_ric: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32x2f24_rip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32x2f24_x: ClangBuiltin<"__builtin_xtensa_ae_s32x2f24_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2f24_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2f24_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2ra64s_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s64_i: ClangBuiltin<"__builtin_xtensa_ae_s64_i">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s64_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s64_x: ClangBuiltin<"__builtin_xtensa_ae_s64_x">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s64_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s64_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_sa16x4_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa16x4_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa16x4_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa16x4_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24_l_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24_l_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24_l_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24_l_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24x2_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24x2_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24x2_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24x2_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2f24_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2f24_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2f24_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2f24_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa64neg_fp: + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa64pos_fp: + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_salign64_i: ClangBuiltin<"__builtin_xtensa_ae_salign64_i">, + Intrinsic<[], [llvm_v8i8_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_sat16x4: ClangBuiltin<"__builtin_xtensa_ae_sat16x4">, + Intrinsic<[llvm_v4i16_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sat24s: ClangBuiltin<"__builtin_xtensa_ae_sat24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sat48s: ClangBuiltin<"__builtin_xtensa_ae_sat48s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_satq56s: ClangBuiltin<"__builtin_xtensa_ae_satq56s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_sb: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sb_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sb_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sbf: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_sbf_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_sbf_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_sbi: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sbi_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sbi_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sel16i: ClangBuiltin<"__builtin_xtensa_ae_sel16i">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sel16i_n: ClangBuiltin<"__builtin_xtensa_ae_sel16i_n">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sext32: ClangBuiltin<"__builtin_xtensa_ae_sext32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sext32x2d16_10: ClangBuiltin<"__builtin_xtensa_ae_sext32x2d16_10">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_sext32x2d16_32: ClangBuiltin<"__builtin_xtensa_ae_sext32x2d16_32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_sha32: ClangBuiltin<"__builtin_xtensa_ae_sha32">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_shortswap: ClangBuiltin<"__builtin_xtensa_ae_shortswap">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa16s: ClangBuiltin<"__builtin_xtensa_ae_slaa16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa32: ClangBuiltin<"__builtin_xtensa_ae_slaa32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa32s: ClangBuiltin<"__builtin_xtensa_ae_slaa32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa64: ClangBuiltin<"__builtin_xtensa_ae_slaa64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa64s: ClangBuiltin<"__builtin_xtensa_ae_slaa64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaaq56: ClangBuiltin<"__builtin_xtensa_ae_slaaq56">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slai16s: ClangBuiltin<"__builtin_xtensa_ae_slai16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai24: ClangBuiltin<"__builtin_xtensa_ae_slai24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai24s: ClangBuiltin<"__builtin_xtensa_ae_slai24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai32: ClangBuiltin<"__builtin_xtensa_ae_slai32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai32s: ClangBuiltin<"__builtin_xtensa_ae_slai32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai64: ClangBuiltin<"__builtin_xtensa_ae_slai64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai64s: ClangBuiltin<"__builtin_xtensa_ae_slai64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slaisq56s: ClangBuiltin<"__builtin_xtensa_ae_slaisq56s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slas24: ClangBuiltin<"__builtin_xtensa_ae_slas24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas24s: ClangBuiltin<"__builtin_xtensa_ae_slas24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas32: ClangBuiltin<"__builtin_xtensa_ae_slas32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas32s: ClangBuiltin<"__builtin_xtensa_ae_slas32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas64: ClangBuiltin<"__builtin_xtensa_ae_slas64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas64s: ClangBuiltin<"__builtin_xtensa_ae_slas64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_slasq56: ClangBuiltin<"__builtin_xtensa_ae_slasq56">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_slassq56s: ClangBuiltin<"__builtin_xtensa_ae_slassq56s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_sra64_32: ClangBuiltin<"__builtin_xtensa_ae_sra64_32">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa16rs: ClangBuiltin<"__builtin_xtensa_ae_sraa16rs">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa16s: ClangBuiltin<"__builtin_xtensa_ae_sraa16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa32: ClangBuiltin<"__builtin_xtensa_ae_sraa32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa32rs: ClangBuiltin<"__builtin_xtensa_ae_sraa32rs">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa32s: ClangBuiltin<"__builtin_xtensa_ae_sraa32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa64: ClangBuiltin<"__builtin_xtensa_ae_sraa64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srai16: ClangBuiltin<"__builtin_xtensa_ae_srai16">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai16r: ClangBuiltin<"__builtin_xtensa_ae_srai16r">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai24: ClangBuiltin<"__builtin_xtensa_ae_srai24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai32: ClangBuiltin<"__builtin_xtensa_ae_srai32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai32r: ClangBuiltin<"__builtin_xtensa_ae_srai32r">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai64: ClangBuiltin<"__builtin_xtensa_ae_srai64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sras24: ClangBuiltin<"__builtin_xtensa_ae_sras24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sras32: ClangBuiltin<"__builtin_xtensa_ae_sras32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sras64: ClangBuiltin<"__builtin_xtensa_ae_sras64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_srla32: ClangBuiltin<"__builtin_xtensa_ae_srla32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srla64: ClangBuiltin<"__builtin_xtensa_ae_srla64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srli24: ClangBuiltin<"__builtin_xtensa_ae_srli24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srli32: ClangBuiltin<"__builtin_xtensa_ae_srli32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srli64: ClangBuiltin<"__builtin_xtensa_ae_srli64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srls24: ClangBuiltin<"__builtin_xtensa_ae_srls24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srls32: ClangBuiltin<"__builtin_xtensa_ae_srls32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srls64: ClangBuiltin<"__builtin_xtensa_ae_srls64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub16: ClangBuiltin<"__builtin_xtensa_ae_sub16">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub16s: ClangBuiltin<"__builtin_xtensa_ae_sub16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub24s: ClangBuiltin<"__builtin_xtensa_ae_sub24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub32: ClangBuiltin<"__builtin_xtensa_ae_sub32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub32s: ClangBuiltin<"__builtin_xtensa_ae_sub32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub64: ClangBuiltin<"__builtin_xtensa_ae_sub64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub64s: ClangBuiltin<"__builtin_xtensa_ae_sub64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_subadd32: ClangBuiltin<"__builtin_xtensa_ae_subadd32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_subadd32s: ClangBuiltin<"__builtin_xtensa_ae_subadd32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_trunca32f64s_l: ClangBuiltin<"__builtin_xtensa_ae_trunca32f64s_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_trunca32x2f64s: ClangBuiltin<"__builtin_xtensa_ae_trunca32x2f64s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_trunci32f64s_l: ClangBuiltin<"__builtin_xtensa_ae_trunci32f64s_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_trunci32x2f64s: ClangBuiltin<"__builtin_xtensa_ae_trunci32x2f64s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_vldl16c: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldl16c_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldl16c_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldl16t: + Intrinsic<[llvm_v1i1_ty, llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldl32t: + Intrinsic<[llvm_v1i1_ty, llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldsht: ClangBuiltin<"__builtin_xtensa_ae_vldsht">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_ae_vlel16t: + Intrinsic<[llvm_v1i1_ty, llvm_i32_ty], [llvm_i32_ty, llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vlel32t: + Intrinsic<[llvm_v1i1_ty, llvm_i32_ty], [llvm_i32_ty, llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vles16c: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vles16c_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vles16c_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_xor: ClangBuiltin<"__builtin_xtensa_ae_xor">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_zalign64: ClangBuiltin<"__builtin_xtensa_ae_zalign64">, + Intrinsic<[llvm_v8i8_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_bithead: ClangBuiltin<"__builtin_xtensa_rur_ae_bithead">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_bitptr: ClangBuiltin<"__builtin_xtensa_rur_ae_bitptr">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_bitsused: ClangBuiltin<"__builtin_xtensa_rur_ae_bitsused">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_cbegin0: ClangBuiltin<"__builtin_xtensa_rur_ae_cbegin0">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_cend0: ClangBuiltin<"__builtin_xtensa_rur_ae_cend0">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_cw_sd_no: ClangBuiltin<"__builtin_xtensa_rur_ae_cw_sd_no">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_cwrap: ClangBuiltin<"__builtin_xtensa_rur_ae_cwrap">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_first_ts: ClangBuiltin<"__builtin_xtensa_rur_ae_first_ts">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_nextoffset: ClangBuiltin<"__builtin_xtensa_rur_ae_nextoffset">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_overflow: ClangBuiltin<"__builtin_xtensa_rur_ae_overflow">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_ovf_sar: ClangBuiltin<"__builtin_xtensa_rur_ae_ovf_sar">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_sar: ClangBuiltin<"__builtin_xtensa_rur_ae_sar">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_searchdone: ClangBuiltin<"__builtin_xtensa_rur_ae_searchdone">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_tablesize: ClangBuiltin<"__builtin_xtensa_rur_ae_tablesize">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_ts_fts_bu_bp: ClangBuiltin<"__builtin_xtensa_rur_ae_ts_fts_bu_bp">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_wur_ae_bithead: ClangBuiltin<"__builtin_xtensa_wur_ae_bithead">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_bitptr: ClangBuiltin<"__builtin_xtensa_wur_ae_bitptr">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_bitsused: ClangBuiltin<"__builtin_xtensa_wur_ae_bitsused">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_cbegin0: ClangBuiltin<"__builtin_xtensa_wur_ae_cbegin0">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_cend0: ClangBuiltin<"__builtin_xtensa_wur_ae_cend0">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_cw_sd_no: ClangBuiltin<"__builtin_xtensa_wur_ae_cw_sd_no">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_cwrap: ClangBuiltin<"__builtin_xtensa_wur_ae_cwrap">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_first_ts: ClangBuiltin<"__builtin_xtensa_wur_ae_first_ts">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_nextoffset: ClangBuiltin<"__builtin_xtensa_wur_ae_nextoffset">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_overflow: ClangBuiltin<"__builtin_xtensa_wur_ae_overflow">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_ovf_sar: ClangBuiltin<"__builtin_xtensa_wur_ae_ovf_sar">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_sar: ClangBuiltin<"__builtin_xtensa_wur_ae_sar">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_searchdone: ClangBuiltin<"__builtin_xtensa_wur_ae_searchdone">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_tablesize: ClangBuiltin<"__builtin_xtensa_wur_ae_tablesize">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_ts_fts_bu_bp: ClangBuiltin<"__builtin_xtensa_wur_ae_ts_fts_bu_bp">, + Intrinsic<[], [llvm_i32_ty], []>; + diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.def b/llvm/include/llvm/TargetParser/XtensaTargetParser.def index b765b015c1265..edc178ac559f5 100644 --- a/llvm/include/llvm/TargetParser/XtensaTargetParser.def +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.def @@ -45,6 +45,7 @@ XTENSA_FEATURE(FK_REGPROTECT, "regprotect") XTENSA_FEATURE(FK_MISCSR, "miscsr") XTENSA_FEATURE(FK_ESP32S2OPS, "esp32s2") XTENSA_FEATURE(FK_ESP32S3OPS, "esp32s3") +XTENSA_FEATURE(FK_HIFI3, "hifi3") #undef XTENSA_FEATURE @@ -73,6 +74,14 @@ XTENSA_CPU(ESP32S3, {"esp32s3"}, FK_COPROCESSOR | FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | FK_ESP32S3OPS)) +XTENSA_CPU(CNL, {"cnl"}, (FK_DENSITY | FK_FP | FK_LOOP | FK_MAC16 | FK_WINDOWED | FK_BOOLEAN | + FK_SEXT | FK_NSA | FK_MUL32 | FK_MUL32HIGH | FK_S32C1I | + FK_THREADPTR | FK_DIV32 | FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | + FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | + FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | + FK_HIFI3)) + + #undef XTENSA_CPU #ifndef XTENSA_CPU_ALIAS diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.h b/llvm/include/llvm/TargetParser/XtensaTargetParser.h index d4e639005a5a2..8e1c55c6f9e21 100644 --- a/llvm/include/llvm/TargetParser/XtensaTargetParser.h +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.h @@ -59,7 +59,8 @@ enum FeatureKind : uint64_t { FK_REGPROTECT = 1 << 28, FK_MISCSR = 1 << 29, FK_ESP32S2OPS = 1 << 30, - FK_ESP32S3OPS = 1ULL << 31 + FK_ESP32S3OPS = 1ULL << 31, + FK_HIFI3 = 1ULL << 32 }; CPUKind parseCPUKind(StringRef CPU); diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index d4f705abde5c2..72c687ba025ad 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -256,6 +256,10 @@ struct XtensaOperand : public MCParsedAsmOperand { bool isImm() const override { return Kind == Immediate; } bool isMem() const override { return false; } + template bool isImmInRange() const { + return Kind == Immediate && inRange(getImm(), Lo, Hi); + } + bool isImm(int64_t MinValue, int64_t MaxValue) const { return Kind == Immediate && inRange(getImm(), MinValue, MaxValue); } diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt index 93a133be9dff8..10e957be5f381 100644 --- a/llvm/lib/Target/Xtensa/CMakeLists.txt +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -34,6 +34,7 @@ add_llvm_target(XtensaCodeGen XtensaTargetMachine.cpp XtensaTargetObjectFile.cpp XtensaTargetTransformInfo.cpp + XtensaBRegFixupPass.cpp LINK_COMPONENTS AsmPrinter diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 8ea0991b5040e..64ce0e4ab9b4b 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -44,6 +44,11 @@ class XtensaDisassembler : public MCDisassembler { bool hasESP32S3Ops() const { return STI.getFeatureBits()[Xtensa::FeatureESP32S3Ops]; + + } + + bool hasHIFI3() const { + return STI.getFeatureBits()[Xtensa::FeatureHIFI3]; } DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size, @@ -68,6 +73,38 @@ static const unsigned ARDecoderTable[] = { Xtensa::A6, Xtensa::A7, Xtensa::A8, Xtensa::A9, Xtensa::A10, Xtensa::A11, Xtensa::A12, Xtensa::A13, Xtensa::A14, Xtensa::A15}; +static const unsigned AE_DRDecoderTable[] = { + Xtensa::AED0, Xtensa::AED1, Xtensa::AED2, Xtensa::AED3, + Xtensa::AED4, Xtensa::AED5, Xtensa::AED6, Xtensa::AED7, + Xtensa::AED8, Xtensa::AED9, Xtensa::AED10, Xtensa::AED11, + Xtensa::AED12, Xtensa::AED13, Xtensa::AED14, Xtensa::AED15}; + +static const unsigned AE_VALIGNDecoderTable[] = {Xtensa::U0, Xtensa::U1, + Xtensa::U2, Xtensa::U3}; + + +static DecodeStatus DecodeAE_DRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(AE_DRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = AE_DRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeAE_VALIGNRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(AE_VALIGNDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = AE_VALIGNDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeARRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, const void *Decoder) { @@ -115,6 +152,40 @@ static const unsigned BRDecoderTable[] = { Xtensa::B6, Xtensa::B7, Xtensa::B8, Xtensa::B9, Xtensa::B10, Xtensa::B11, Xtensa::B12, Xtensa::B13, Xtensa::B14, Xtensa::B15}; +static const unsigned BR2DecoderTable[] = { + Xtensa::B0_B1, Xtensa::B2_B3, Xtensa::B4_B5, Xtensa::B6_B7, + Xtensa::B8_B9, Xtensa::B10_B11, Xtensa::B12_B13, Xtensa::B14_B15}; + + +static const unsigned BR4DecoderTable[] = { + Xtensa::B0_B1_B2_B3, Xtensa::B4_B5_B6_B7, + Xtensa::B8_B9_B10_B11, Xtensa::B12_B13_B14_B15}; + + +static DecodeStatus DecodeXtensaRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder, + ArrayRef DecoderTable) { + if (RegNo >= DecoderTable.size()) + return MCDisassembler::Fail; + + unsigned Reg = DecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeBR2RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + return DecodeXtensaRegisterClass(Inst,RegNo,Address,Decoder,ArrayRef(BR2DecoderTable)); +} + +static DecodeStatus DecodeBR4RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + return DecodeXtensaRegisterClass(Inst,RegNo,Address,Decoder,ArrayRef(BR4DecoderTable)); +} + static DecodeStatus DecodeBRRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, const void *Decoder) { @@ -564,6 +635,14 @@ static DecodeStatus decodeImm64n_4nOperand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeOffset8m32Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm << 2)); + return MCDisassembler::Success; +} + static DecodeStatus decodeEntry_Imm12OpValue(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { @@ -758,7 +837,7 @@ static DecodeStatus decodeMem32nOperand(MCInst &Inst, uint64_t Imm, /// Read two bytes from the ArrayRef and return 16 bit data sorted /// according to the given endianness. static DecodeStatus readInstruction16(ArrayRef Bytes, uint64_t Address, - uint64_t &Size, uint32_t &Insn, + uint64_t &Size, uint64_t &Insn, bool IsLittleEndian) { // We want to read exactly 2 Bytes of data. if (Bytes.size() < 2) { @@ -777,7 +856,7 @@ static DecodeStatus readInstruction16(ArrayRef Bytes, uint64_t Address, /// Read three bytes from the ArrayRef and return 24 bit data static DecodeStatus readInstruction24(ArrayRef Bytes, uint64_t Address, - uint64_t &Size, uint32_t &Insn, + uint64_t &Size, uint64_t &Insn, bool IsLittleEndian) { // We want to read exactly 3 Bytes of data. if (Bytes.size() < 3) { @@ -796,7 +875,7 @@ static DecodeStatus readInstruction24(ArrayRef Bytes, uint64_t Address, /// Read three bytes from the ArrayRef and return 32 bit data static DecodeStatus readInstruction32(ArrayRef Bytes, uint64_t Address, - uint64_t &Size, uint32_t &Insn, + uint64_t &Size, uint64_t &Insn, bool IsLittleEndian) { // We want to read exactly 4 Bytes of data. if (Bytes.size() < 4) { @@ -813,13 +892,37 @@ static DecodeStatus readInstruction32(ArrayRef Bytes, uint64_t Address, return MCDisassembler::Success; } +/// Read InstSize bytes from the ArrayRef and return 24 bit data +static DecodeStatus readInstructionN(ArrayRef Bytes, uint64_t Address, + unsigned InstSize, + uint64_t &Size, uint64_t &Insn, + bool IsLittleEndian) { + // We want to read exactly 3 Bytes of data. + if (Bytes.size() < InstSize) { + Size = 0; + return MCDisassembler::Fail; + } + + if (!IsLittleEndian) { + report_fatal_error("Big-endian mode currently is not supported!"); + } else { + Insn = 0; + for (unsigned i = 0; i < InstSize; i++) + Insn |= (Bytes[i] << 8*i); + } + + Size = InstSize; + return MCDisassembler::Success; +} + + #include "XtensaGenDisassemblerTables.inc" DecodeStatus XtensaDisassembler::getInstruction(MCInst &MI, uint64_t &Size, ArrayRef Bytes, uint64_t Address, raw_ostream &CS) const { - uint32_t Insn; + uint64_t Insn; DecodeStatus Result; // Parse 16-bit instructions @@ -872,5 +975,20 @@ DecodeStatus XtensaDisassembler::getInstruction(MCInst &MI, uint64_t &Size, } } + if (hasHIFI3()) { + LLVM_DEBUG(dbgs() << "Trying Xtensa HIFI3 24-bit instruction table :\n"); + Result = decodeInstruction(DecoderTableHIFI324, MI, Insn, Address, this, STI); + if(Result != MCDisassembler::Fail) + return Result; + + Result = readInstructionN(Bytes, Address, 48, Size, Insn, IsLittleEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + + LLVM_DEBUG(dbgs() << "Trying Xtensa HIFI3 48-bit instruction table :\n"); + Result = decodeInstruction(DecoderTableHIFI348, MI, Insn, Address, this, STI); + if(Result != MCDisassembler::Fail) + return Result; + } return Result; } diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index 9224d0a98c14b..756554bcf09b9 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -16,7 +16,9 @@ #define LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAINSTPRINTER_H #include "llvm/MC/MCInstPrinter.h" +#include "llvm/MC/MCInst.h" #include "llvm/Support/Compiler.h" +#include "llvm/Support/raw_ostream.h" namespace llvm { class MCOperand; @@ -82,6 +84,18 @@ class XtensaInstPrinter : public MCInstPrinter { void printOffset_128_2_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset_128_1_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset_64_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + + template + void printImmOperand(const MCInst *MI, int OpNum, raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= lo && Value <= hi && ((Value % step) == 0)) && + "Invalid argument"); + O << Value; + } else { + printOperand(MI, OpNum, O); + } + } }; } // end namespace llvm diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp index 4537369b017d0..1dc3fca572fe8 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp @@ -23,12 +23,11 @@ XtensaMCAsmInfo::XtensaMCAsmInfo(const Triple &TT) { PrivateGlobalPrefix = ".L"; CommentString = "#"; ZeroDirective = "\t.space\t"; - Data16bitsDirective = "\t.half\t"; + Data16bitsDirective = "\t.2byte\t"; Data32bitsDirective = "\t.word\t"; Data64bitsDirective = "\t.quad\t"; GlobalDirective = "\t.global\t"; UsesELFSectionDirectiveForBSS = true; SupportsDebugInformation = true; - ExceptionsType = ExceptionHandling::DwarfCFI; AlignmentIsInBytes = true; } diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 185507a93c410..958dedbaaa2e5 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -123,6 +123,10 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + uint32_t getOffset8m32OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getEntry_Imm12OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; @@ -518,6 +522,18 @@ XtensaMCCodeEmitter::getImm64n_4nOpValue(const MCInst &MI, unsigned OpNo, return Res & 0x3f; } +uint32_t +XtensaMCCodeEmitter::getOffset8m32OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint32_t Res = static_cast(MO.getImm()); + + assert(((Res & 0x3) == 0) && "Unexpected operand value!"); + + return Res; +} + uint32_t XtensaMCCodeEmitter::getEntry_Imm12OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, diff --git a/llvm/lib/Target/Xtensa/Xtensa.h b/llvm/lib/Target/Xtensa/Xtensa.h index 5f7c7292a51bc..4a7ff2e617c07 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.h +++ b/llvm/lib/Target/Xtensa/Xtensa.h @@ -31,5 +31,6 @@ FunctionPass *createXtensaHardwareLoops(); FunctionPass *createXtensaFixupHwLoops(); FunctionPass *createXtensaPSRAMCacheFixPass(); FunctionPass *createXtensaConstantIslandPass(); +FunctionPass *createXtensaBRegFixupPass(); } // namespace llvm #endif /* LLVM_LIB_TARGET_XTENSA_XTENSA_H */ diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 7d03565751e6e..0bd57503d1010 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -178,6 +178,11 @@ def FeatureESP32S3Ops : SubtargetFeature<"esp32s3", "HasESP32S3Ops", "tru def HasESP32S3Ops : Predicate<"Subtarget->hasESP32S3Ops()">, AssemblerPredicate<(all_of FeatureESP32S3Ops)>; +def FeatureHIFI3 : SubtargetFeature<"hifi3", "HasHIFI3", "true", + "Enable Xtensa HIFI3 instructions">; +def HasHIFI3 : Predicate<"Subtarget->hasHIFI3()">, + AssemblerPredicate<(all_of FeatureHIFI3)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// @@ -206,6 +211,13 @@ def : Proc<"esp32s3", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureM FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureMINMAX, FeatureCLAMPS, FeatureESP32S3Ops]>; +def : Proc<"cnl", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureWindowed, FeatureBoolean, + FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureS32C1I, + FeatureTHREADPTR, FeatureDiv32, FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, + FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, + FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, + FeatureRegionProtection, FeatureMiscSR, FeatureHIFI3]>; + //===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp b/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp new file mode 100644 index 0000000000000..0fcdebc1b8858 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp @@ -0,0 +1,229 @@ +//===- XtensaBRegFixup.cpp - Xtensa boolean register fixup ----------------===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen//MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "xtensa-breg-fixup-pass" + +class XtensaBRegFixup : public MachineFunctionPass { +public: + static char ID; + XtensaBRegFixup() : MachineFunctionPass(ID) {} + + const XtensaSubtarget *Subtarget; + static const XtensaInstrInfo *XtensaII; + + bool runOnMachineFunction(MachineFunction &MF) override; + + llvm::StringRef getPassName() const override { + return "Xtensa bool reg fixup pass"; + } + +private: + bool VisitMBB(MachineBasicBlock &MBB); + bool VisitInstruction(const MachineBasicBlock::instr_iterator &MII); +}; + +char XtensaBRegFixup::ID = 0; +const XtensaInstrInfo *XtensaBRegFixup::XtensaII; + +bool XtensaBRegFixup::runOnMachineFunction(MachineFunction &MF) { + + Subtarget = &static_cast(MF.getSubtarget()); + XtensaII = static_cast(Subtarget->getInstrInfo()); + bool Modified = false; + + MachineFunction::iterator I = MF.begin(), E = MF.end(); + + LLVM_DEBUG(dbgs() << "********* " << getPassName() << " *********\n"); + + for (; I != E; ++I) + Modified |= VisitMBB(*I); + return Modified; +} + +FunctionPass *llvm::createXtensaBRegFixupPass() { + return new XtensaBRegFixup(); +} + +bool XtensaBRegFixup::VisitMBB(MachineBasicBlock &MBB) { + bool Modified = false; + MachineBasicBlock::instr_iterator MII = MBB.instr_begin(), + E = MBB.instr_end(); + MachineBasicBlock::instr_iterator NextMII; + + // Iterate through the instructions in the basic block + for (; MII != E; MII = NextMII) { + NextMII = std::next(MII); + MachineInstr *MI = &*MII; + + // Don't reduce bundled instructions or pseudo operations + if (MI->isBundle() || MI->isTransient()) + continue; + + Modified |= VisitInstruction(MII); + } + + return Modified; +} + +bool XtensaBRegFixup::VisitInstruction( + const MachineBasicBlock::instr_iterator &MII) { + MachineInstr *MI = &*MII; + MachineBasicBlock &MBB = *MI->getParent(); + MachineFunction *MF = MBB.getParent(); + const XtensaInstrInfo &TII = + *static_cast(MF->getSubtarget().getInstrInfo()); + unsigned Opcode = MI->getOpcode(); + + switch (Opcode) { + case Xtensa::MOVBA2_P2: + case Xtensa::MOVBA_P2: { + + unsigned RegBase; + unsigned Arity; + + switch (Opcode) { + case Xtensa::MOVBA_P2: + RegBase = Xtensa::B0; + Arity = 1; + break; + case Xtensa::MOVBA2_P2: + RegBase = Xtensa::B0_B1; + Arity = 2; + break; + default: + llvm_unreachable("Unknown MOVBA opcode"); + } + /* + MOVBA_P2 Breg, Dst1, Dst2, Src + | + V + RSR Dst1, BREG + LoadImmediate Dst2, BregMask + AND Dst2, Dst2, Dst1 + SLLI Dst1, Src, BregShift + OR Dst2, Dst2, Dst1 + WSR BREG, Dst2 + */ + // TODO: Mask SRC, e.g. by EXTUI + MachineOperand Breg = MI->getOperand(0); + MachineOperand Dst1 = MI->getOperand(1); + MachineOperand Dst2 = MI->getOperand(2); + MachineOperand Src = MI->getOperand(3); + DebugLoc DL = MI->getDebugLoc(); + unsigned RegNo = Breg.getReg().id() - RegBase; + + int64_t BaseMask = (1 << Arity) - 1; + int64_t Mask = 0xffff & (~(BaseMask << (RegNo * Arity))); + + MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(Xtensa::RSR)).add(Dst1).addReg(Xtensa::BREG); + + TII.buildLoadImmediate(MBB, MI, Dst2.getReg(), Mask); + + BuildMI(MBB, MI, DL, TII.get(Xtensa::AND)).add(Dst2).add(Dst2).add(Dst1); + + if (RegNo > 0) { + BuildMI(MBB, MI, DL, TII.get(Xtensa::SLLI)) + .add(Dst1) + .add(Src) + .addImm(RegNo); + } else { + Dst1.setReg(Src.getReg()); + } + + BuildMI(MBB, MI, DL, TII.get(Xtensa::OR)).add(Dst2).add(Dst2).add(Dst1); + + BuildMI(MBB, MI, DL, TII.get(Xtensa::WSR)).addReg(Xtensa::BREG).add(Dst2); + + LLVM_DEBUG(dbgs() << " Fixed MOVBA_P2: " << *MIB); + MBB.erase_instr(MI); + return true; + } break; + case Xtensa::EXTUI_BR_P: { + case Xtensa::EXTUI_BR2_P: + case Xtensa::EXTUI_BR4_P: + unsigned RegBase; + unsigned Arity; + + switch (Opcode) { + case Xtensa::EXTUI_BR_P: + RegBase = Xtensa::B0; + Arity = 1; + break; + case Xtensa::EXTUI_BR2_P: + RegBase = Xtensa::B0_B1; + Arity = 2; + break; + case Xtensa::EXTUI_BR4_P: + RegBase = Xtensa::B0_B1_B2_B3; + Arity = 4; + break; + default: + llvm_unreachable("Unknown EXTUI opcode"); + } + + MachineOperand Breg = MI->getOperand(2); + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::EXTUI); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + MIB.add(MI->getOperand(0)); + MIB.add(MI->getOperand(1)); + unsigned RegNo = Breg.getReg().id() - RegBase; + MIB.addImm(RegNo * Arity); + MIB.addImm(Arity); + + LLVM_DEBUG(dbgs() << " Fixed EXTUI: " << *MIB); + MBB.erase_instr(MI); + return true; + + } break; + + case Xtensa::SLLI_BR_P: { + + unsigned RegBase = Xtensa::B0; + MachineOperand Breg = MI->getOperand(2); + unsigned RegNo = Breg.getReg().id() - RegBase; + if (RegNo != 0) { + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::SLLI); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + MIB.add(MI->getOperand(0)); + MIB.add(MI->getOperand(1)); + MIB.addImm(RegNo); + + LLVM_DEBUG(dbgs() << " Fixed SLLI: " << *MIB); + } else { + LLVM_DEBUG(dbgs() << " Fixed SLLI: SLLI 0 => NOP"); + } + MBB.erase_instr(MI); + return true; + + } break; + default: + break; + } + + return false; +} \ No newline at end of file diff --git a/llvm/lib/Target/Xtensa/XtensaCallingConv.td b/llvm/lib/Target/Xtensa/XtensaCallingConv.td index adfb8656b32d3..56c2fb1aa1d2b 100644 --- a/llvm/lib/Target/Xtensa/XtensaCallingConv.td +++ b/llvm/lib/Target/Xtensa/XtensaCallingConv.td @@ -14,17 +14,26 @@ class CCIfAlign : CCIf; +class CCIfFeature: + CCIf().has", Feature, "()"), A>; + //===----------------------------------------------------------------------===// // Xtensa return value calling convention //===----------------------------------------------------------------------===// def RetCC_Xtensa : CallingConv<[ CCIfType<[i1, i8, i16], CCPromoteToType>, + CCIfFeature<"Boolean",CCIfType<[v1i1], CCAssignToReg<[B0]>>>, + CCIfFeature<"Boolean",CCIfType<[v2i1], CCAssignToReg<[B0_B1]>>>, + CCIfFeature<"Boolean",CCIfType<[v4i1], CCAssignToReg<[B0_B1_B2_B3]>>>, CCIfType<[f32], CCBitConvertToType>, //First two return values go in a2, a3, a4, a5 CCIfType<[i32], CCAssignToReg<[A2, A3, A4, A5]>>, CCIfType<[f32], CCAssignToReg<[A2, A3, A4, A5]>>, - CCIfType<[i64], CCAssignToRegWithShadow<[A2, A4], [A3, A5]>> + CCIfType<[i64], CCAssignToRegWithShadow<[A2, A4], [A3, A5]>>, + CCIfFeature<"HIFI3", + CCIfType<[v4i16, v2i32, v1i64, v1i32], + CCAssignToReg<[AED0, AED1, AED2, AED3]>>> ]>; //===----------------------------------------------------------------------===// @@ -39,10 +48,14 @@ def CSRWE_Xtensa : CalleeSavedRegs<(add)> { def RetCCW_Xtensa : CallingConv<[ CCIfType<[i1, i8, i16], CCPromoteToType>, + CCIfFeature<"Boolean",CCIfType<[v1i1], CCAssignToReg<[B0]>>>, CCIfType<[f32], CCBitConvertToType>, //First two return values go in a10, a11, a12, a13 CCIfType<[i32], CCAssignToReg<[A10, A11, A12, A13]>>, CCIfType<[f32], CCAssignToReg<[A10, A11, A12, A13]>>, - CCIfType<[i64], CCAssignToRegWithShadow<[A10, A12], [A11, A13]>> + CCIfType<[i64], CCAssignToRegWithShadow<[A10, A12], [A11, A13]>>, + CCIfFeature<"HIFI3", + CCIfType<[v4i16, v2i32, v1i64, v1i32], + CCAssignToReg<[AED0, AED1, AED2, AED3]>>> ]>; diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp index 9c67b0d419571..f1db383620b37 100644 --- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -375,15 +375,32 @@ void XtensaFrameLowering::processFunctionBeforeFrameFinalized( MachineFunction &MF, RegScavenger *RS) const { const XtensaSubtarget &STI = MF.getSubtarget(); + // Presence of SPILL_* pseudo-instructions requires spill slots + int NeedRegs = 0; + for (const MachineBasicBlock &MBB : MF) { + for (const MachineInstr &MI : MBB) { + unsigned Opcode = MI.getOpcode(); + if (Opcode == Xtensa::SPILL_BOOL) + NeedRegs += 1; + + if (Opcode == Xtensa::RESTORE_BOOL) + NeedRegs += 3; + } + } + NeedRegs = std::min(16, NeedRegs); + // In WinABI mode add register scavenging slot // FIXME: It may be posssible to add spill slot by more optimal way if (STI.isWinABI() && - (MF.getFrameInfo().estimateStackSize(MF) > STACK_SIZE_THRESHOLD)) { + ((MF.getFrameInfo().estimateStackSize(MF) > STACK_SIZE_THRESHOLD) || + (NeedRegs > 0))) { MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetRegisterClass &RC = Xtensa::ARRegClass; const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); unsigned Size = TRI.getSpillSize(RC); Align Alignment = TRI.getSpillAlign(RC); - RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Alignment, false)); + for (int i = 0; i < NeedRegs; i++) + RS->addScavengingFrameIndex( + MFI.CreateStackObject(Size, Alignment, false)); } } diff --git a/llvm/lib/Target/Xtensa/XtensaHIFIInstrFormats.td b/llvm/lib/Target/Xtensa/XtensaHIFIInstrFormats.td new file mode 100644 index 0000000000000..f06847a0da7dd --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaHIFIInstrFormats.td @@ -0,0 +1,48 @@ +//===- XtensaHIFIInstrFormats.td - Instruction formats for Xtensa HIFI -*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains Tablegen instruction formats for Xtensa HIFI extension +// +//===----------------------------------------------------------------------===// + +// Base class for Xtensa 64 bit Format +class XtensaInst64 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst<8, outs, ins, asmstr, pattern, itin> +{ + bits<64> Inst = 0; + bits<64> SoftFail = 0; + let DecoderNamespace = "HIFI3"; +} + +class XtensaInst48 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst<6, outs, ins, asmstr, pattern, itin> +{ + bits<48> Inst = 0; + bits<48> SoftFail = 0; + let DecoderNamespace = "HIFI3"; +} + +class XtensaInst88 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst<11, outs, ins, asmstr, pattern, itin> +{ + bits<88> Inst = 0; + bits<88> SoftFail = 0; + let DecoderNamespace = "HIFI3"; +} + +class XtensaAEInst24 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst24 { + let DecoderNamespace = "HIFI3"; + let Inst = 0; +} \ No newline at end of file diff --git a/llvm/lib/Target/Xtensa/XtensaHIFIInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaHIFIInstrInfo.td new file mode 100644 index 0000000000000..de373ede0efe1 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaHIFIInstrInfo.td @@ -0,0 +1,34720 @@ +//===- XtensaHIFIInstrInfo.td - Instruction definitions for Xtensa HIFI -*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains Tablegen instruction definitiona for Xtensa HIFI extension +// +//===----------------------------------------------------------------------===// + +class AE_ABS16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs16s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ABS16S : AE_ABS16S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs16s AE_DR:$ae_arth_v1))]>; + +class AE_ABS24S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs24s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ABS24S : AE_ABS24S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs24s AE_DR:$ae_arth_v1))]>; + +class AE_ABS32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs32 $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{25} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ABS32 : AE_ABS32_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs32 AE_DR:$ae_arth_v1))]>; + +class AE_ABS32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs32s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ABS32S : AE_ABS32S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs32s AE_DR:$ae_arth_v1))]>; + +class AE_ABS64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs64 $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ABS64 : AE_ABS64_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs64 AE_DR:$ae_arth_v1))]>; + +class AE_ABS64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs64s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{24} = 1; +let Inst{26} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ABS64S : AE_ABS64S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs64s AE_DR:$ae_arth_v1))]>; + +class AE_ADD16_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add16 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{50} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADD16 : AE_ADD16_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add16 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add16s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD16S : AE_ADD16S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add16s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD24S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add24s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD24S : AE_ADD24S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add24s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add32 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD32 : AE_ADD32_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add32 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD32_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add32_hl_lh $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADD32_HL_LH : AE_ADD32_HL_LH_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add32_hl_lh AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add32s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD32S : AE_ADD32S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add32s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add64 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD64 : AE_ADD64_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add64 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add64s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADD64S : AE_ADD64S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add64s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADDBRBA32_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$arr), (ins AR:$art, AR:$ars), "ae_addbrba32 $arr, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> art; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{20} = arr{0}; +let Inst{21} = arr{1}; +let Inst{22} = arr{2}; +let Inst{23} = arr{3}; +let Inst{24} = art{0}; +let Inst{25} = art{1}; +let Inst{26} = art{2}; +let Inst{27} = art{3}; +let Inst{16} = ars{0}; +let Inst{17} = ars{1}; +let Inst{18} = ars{2}; +let Inst{19} = ars{3}; +} + + + +def AE_ADDBRBA32 : AE_ADDBRBA32_AE_FORMAT48<[(set AR:$arr, (int_xtensa_ae_addbrba32 AR:$art, AR:$ars))]>; + +class AE_ADDSUB32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_addsub32 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADDSUB32 : AE_ADDSUB32_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_addsub32 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADDSUB32S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_addsub32s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADDSUB32S : AE_ADDSUB32S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_addsub32s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_AND_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1), "ae_and $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_dr_to_dr_v1{0}; +let Inst{5} = ae_dr_to_dr_v1{1}; +let Inst{6} = ae_dr_to_dr_v1{2}; +let Inst{7} = ae_dr_to_dr_v1{3}; +} + + + +def AE_AND : AE_AND_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_and AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1))]>; + +class AE_CVT32X2F16_10_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_cvt32x2f16.10 $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_CVT32X2F16_10 : AE_CVT32X2F16_10_AE_FORMAT<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_cvt32x2f16_10 AE_DR:$ae_to_dr_v0))]>; + +class AE_CVT32X2F16_32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_cvt32x2f16.32 $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_CVT32X2F16_32 : AE_CVT32X2F16_32_AE_FORMAT<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_cvt32x2f16_32 AE_DR:$ae_to_dr_v0))]>; + +class AE_CVT48A32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_cvt48a32 $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_CVT48A32 : AE_CVT48A32_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_cvt48a32 AR:$ars))]>; + +class AE_CVT64A32_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_cvt64a32 $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{25} = 1; +let Inst{27} = 1; +let Inst{38} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{20} = ae_ar_to_dr_v{0}; +let Inst{21} = ae_ar_to_dr_v{1}; +let Inst{22} = ae_ar_to_dr_v{2}; +let Inst{23} = ae_ar_to_dr_v{3}; +let Inst{16} = ars{0}; +let Inst{17} = ars{1}; +let Inst{18} = ars{2}; +let Inst{19} = ars{3}; +} + + + +def AE_CVT64A32 : AE_CVT64A32_AE_FORMAT48<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_cvt64a32 AR:$ars))]>; + +class AE_CVT64F32_H_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0), "ae_cvt64f32.h $ae_dr_to_dr_v, $ae_dr_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{28} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_dr_to_dr_v{0}; +let Inst{21} = ae_dr_to_dr_v{1}; +let Inst{22} = ae_dr_to_dr_v{2}; +let Inst{23} = ae_dr_to_dr_v{3}; +let Inst{36} = ae_dr_to_dr_v0{0}; +let Inst{37} = ae_dr_to_dr_v0{1}; +let Inst{38} = ae_dr_to_dr_v0{2}; +let Inst{39} = ae_dr_to_dr_v0{3}; +} + + + +def AE_CVT64F32_H : AE_CVT64F32_H_AE_FORMAT1<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_cvt64f32_h AE_DR:$ae_dr_to_dr_v0))]>; + +class AE_CVTA32F24S_H_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_cvta32f24s.h $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_CVTA32F24S_H : AE_CVTA32F24S_H_AE_FORMAT<[(set AR:$arr, (int_xtensa_ae_cvta32f24s_h AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_CVTA32F24S_L_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_cvta32f24s.l $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_CVTA32F24S_L : AE_CVTA32F24S_L_AE_FORMAT<[(set AR:$arr, (int_xtensa_ae_cvta32f24s_l AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_CVTQ56A32S_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_cvtq56a32s $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{24} = 1; +let Inst{25} = 1; +let Inst{27} = 1; +let Inst{38} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{20} = ae_ar_to_dr_v{0}; +let Inst{21} = ae_ar_to_dr_v{1}; +let Inst{22} = ae_ar_to_dr_v{2}; +let Inst{23} = ae_ar_to_dr_v{3}; +let Inst{16} = ars{0}; +let Inst{17} = ars{1}; +let Inst{18} = ars{2}; +let Inst{19} = ars{3}; +} + + + +def AE_CVTQ56A32S : AE_CVTQ56A32S_AE_FORMAT48<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_cvtq56a32s AR:$ars))]>; + +class AE_CVTQ56P32S_H_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0), "ae_cvtq56p32s.h $ae_dr_to_dr_v, $ae_dr_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +} + + + +def AE_CVTQ56P32S_H : AE_CVTQ56P32S_H_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_cvtq56p32s_h AE_DR:$ae_dr_to_dr_v0))]>; + +class AE_CVTQ56P32S_L_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0), "ae_cvtq56p32s.l $ae_dr_to_dr_v, $ae_dr_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +} + + + +def AE_CVTQ56P32S_L : AE_CVTQ56P32S_L_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_cvtq56p32s_l AE_DR:$ae_dr_to_dr_v0))]>; + +class AE_DB_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_db $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{14} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_DB : AE_DB_X24<[(set AR:$ars_out, (int_xtensa_ae_db AR:$ars, AR:$art))]>; + +class AE_DB_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_db.ic $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_DB_IC : AE_DB_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_db_ic AR:$ars, AR:$art))]>; + +class AE_DB_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_db.ip $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_DB_IP : AE_DB_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_db_ip AR:$ars, AR:$art))]>; + +class AE_DBI_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, imm1_16:$ae_ohba), "ae_dbi $ars, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> ae_ohba; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{14} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_DBI : AE_DBI_X24<[(set AR:$ars_out, (int_xtensa_ae_dbi AR:$ars, timm:$ae_ohba))]>; + +class AE_DBI_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, imm1_16:$ae_ohba), "ae_dbi.ic $ars, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> ae_ohba; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_DBI_IC : AE_DBI_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_dbi_ic AR:$ars, timm:$ae_ohba))]>; + +class AE_DBI_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, imm1_16:$ae_ohba), "ae_dbi.ip $ars, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> ae_ohba; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_DBI_IP : AE_DBI_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_dbi_ip AR:$ars, timm:$ae_ohba))]>; + +class AE_DIV64D32_H_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v_out), (ins AE_DR:$ae_arth_v, AE_DR:$ae_arth_v1), "ae_div64d32.h $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; +let Constraints = "$ae_arth_v = $ae_arth_v_out,@earlyclobber $ae_arth_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_DIV64D32_H : AE_DIV64D32_H_AE_FORMAT1<[(set AE_DR:$ae_arth_v_out, (int_xtensa_ae_div64d32_h AE_DR:$ae_arth_v, AE_DR:$ae_arth_v1))]>; + +class AE_DIV64D32_L_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v_out), (ins AE_DR:$ae_arth_v, AE_DR:$ae_arth_v1), "ae_div64d32.l $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; +let Constraints = "$ae_arth_v = $ae_arth_v_out,@earlyclobber $ae_arth_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_DIV64D32_L : AE_DIV64D32_L_X24<[(set AE_DR:$ae_arth_v_out, (int_xtensa_ae_div64d32_l AE_DR:$ae_arth_v, AE_DR:$ae_arth_v1))]>; + +class AE_EQ16_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR4:$br4), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_eq16 $br4, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> br4; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{20} = 1; +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{22} = br4{0}; +let Inst{23} = br4{1}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_EQ16 : AE_EQ16_AE_FORMAT1<[(set BR4:$br4, (int_xtensa_ae_eq16 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_EQ32_X24 pattern> + : XtensaAEInst24<(outs BR2:$br2), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_eq32 $br2, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<3> br2; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{13} = br2{0}; +let Inst{14} = br2{1}; +let Inst{15} = br2{2}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_EQ32 : AE_EQ32_X24<[(set BR2:$br2, (int_xtensa_ae_eq32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_EQ64_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR:$br), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_eq64 $br, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = br{0}; +let Inst{21} = br{1}; +let Inst{22} = br{2}; +let Inst{23} = br{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_EQ64 : AE_EQ64_AE_FORMAT1<[(set BR:$br, (int_xtensa_ae_eq64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_L16_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm16n_14:$ae_immls16), "ae_l16.i $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_L16_I : AE_L16_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16_i AR:$ars, timm:$ae_immls16))]>; + +class AE_L16_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm16n_14:$ae_immls16), "ae_l16.ip $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_L16_IP : AE_L16_IP_X24<[]>; + +class AE_L16_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l16.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16_X : AE_L16_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16_x AR:$ars, AR:$art))]>; + +class AE_L16_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16_XC : AE_L16_XC_AE_FORMAT48<[]>; + +class AE_L16_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16_XP : AE_L16_XP_AE_FORMAT48<[]>; + +class AE_L16M_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm16n_14:$ae_immls16), "ae_l16m.i $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_L16M_I : AE_L16M_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16m_i AR:$ars, timm:$ae_immls16))]>; + +class AE_L16M_IU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm16n_14:$ae_immls16), "ae_l16m.iu $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_L16M_IU : AE_L16M_IU_X24<[]>; + +class AE_L16M_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l16m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16M_X : AE_L16M_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16m_x AR:$ars, AR:$art))]>; + +class AE_L16M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16M_XC : AE_L16M_XC_AE_FORMAT48<[]>; + +class AE_L16M_XU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16M_XU : AE_L16M_XU_X24<[]>; + +class AE_L16X2M_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l16x2m.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L16X2M_I : AE_L16X2M_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16x2m_i AR:$ars, timm:$ae_immls32))]>; + +class AE_L16X2M_IU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l16x2m.iu $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L16X2M_IU : AE_L16X2M_IU_X24<[]>; + +class AE_L16X2M_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l16x2m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X2M_X : AE_L16X2M_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16x2m_x AR:$ars, AR:$art))]>; + +class AE_L16X2M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16x2m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X2M_XC : AE_L16X2M_XC_AE_FORMAT48<[]>; + +class AE_L16X2M_XU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16x2m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X2M_XU : AE_L16X2M_XU_X24<[]>; + +class AE_L16X4_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l16x4.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L16X4_I : AE_L16X4_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16x4_i AR:$ars, timm:$ae_immls64))]>; + +class AE_L16X4_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm0_56:$ae_immls64pos), "ae_l16x4.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_L16X4_IP : AE_L16X4_IP_X24<[]>; + +class AE_L16X4_RIC_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l16x4.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L16X4_RIC : AE_L16X4_RIC_AE_FORMAT<[]>; + +class AE_L16X4_RIP_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l16x4.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L16X4_RIP : AE_L16X4_RIP_AE_FORMAT<[]>; + +class AE_L16X4_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l16x4.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X4_X : AE_L16X4_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16x4_x AR:$ars, AR:$art))]>; + +class AE_L16X4_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16x4.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X4_XC : AE_L16X4_XC_AE_FORMAT48<[]>; + +class AE_L16X4_XP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16x4.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X4_XP : AE_L16X4_XP_X24<[]>; + +class AE_L32_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32_I : AE_L32_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32_i AR:$ars, timm:$ae_immls32))]>; + +class AE_L32_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32.ip $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32_IP : AE_L32_IP_X24<[]>; + +class AE_L32_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32_X : AE_L32_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32_x AR:$ars, AR:$art))]>; + +class AE_L32_XC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32_XC : AE_L32_XC_X24<[]>; + +class AE_L32_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32_XP : AE_L32_XP_AE_FORMAT48<[]>; + +class AE_L32F24_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32f24.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32F24_I : AE_L32F24_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32f24_i AR:$ars, timm:$ae_immls32))]>; + +class AE_L32F24_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32f24.ip $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32F24_IP : AE_L32F24_IP_X24<[]>; + +class AE_L32F24_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32f24.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32F24_X : AE_L32F24_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32f24_x AR:$ars, AR:$art))]>; + +class AE_L32F24_XC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32f24.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32F24_XC : AE_L32F24_XC_X24<[]>; + +class AE_L32F24_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32f24.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{31} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32F24_XP : AE_L32F24_XP_AE_FORMAT48<[]>; + +class AE_L32M_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32m.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32M_I : AE_L32M_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32m_i AR:$ars, timm:$ae_immls32))]>; + +class AE_L32M_IU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32m.iu $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32M_IU : AE_L32M_IU_X24<[]>; + +class AE_L32M_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32M_X : AE_L32M_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32m_x AR:$ars, AR:$art))]>; + +class AE_L32M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32M_XC : AE_L32M_XC_AE_FORMAT48<[]>; + +class AE_L32M_XU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32M_XU : AE_L32M_XU_X24<[]>; + +class AE_L32X2_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l32x2.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L32X2_I : AE_L32X2_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32x2_i AR:$ars, timm:$ae_immls64))]>; + +class AE_L32X2_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm0_56:$ae_immls64pos), "ae_l32x2.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_L32X2_IP : AE_L32X2_IP_X24<[]>; + +class AE_L32X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l32x2.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L32X2_RIC : AE_L32X2_RIC_AE_FORMAT48<[]>; + +class AE_L32X2_RIP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l32x2.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L32X2_RIP : AE_L32X2_RIP_X24<[]>; + +class AE_L32X2_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32x2.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2_X : AE_L32X2_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32x2_x AR:$ars, AR:$art))]>; + +class AE_L32X2_XC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32x2.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2_XC : AE_L32X2_XC_X24<[]>; + +class AE_L32X2_XP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32x2.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2_XP : AE_L32X2_XP_X24<[]>; + +class AE_L32X2F24_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l32x2f24.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L32X2F24_I : AE_L32X2F24_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32x2f24_i AR:$ars, timm:$ae_immls64))]>; + +class AE_L32X2F24_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm0_56:$ae_immls64pos), "ae_l32x2f24.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_L32X2F24_IP : AE_L32X2F24_IP_X24<[]>; + +class AE_L32X2F24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l32x2f24.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L32X2F24_RIC : AE_L32X2F24_RIC_AE_FORMAT48<[]>; + +class AE_L32X2F24_RIP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l32x2f24.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L32X2F24_RIP : AE_L32X2F24_RIP_X24<[]>; + +class AE_L32X2F24_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32x2f24.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2F24_X : AE_L32X2F24_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32x2f24_x AR:$ars, AR:$art))]>; + +class AE_L32X2F24_XC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32x2f24.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2F24_XC : AE_L32X2F24_XC_X24<[]>; + +class AE_L32X2F24_XP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32x2f24.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2F24_XP : AE_L32X2F24_XP_X24<[]>; + +class AE_L64_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l64.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L64_I : AE_L64_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l64_i AR:$ars, timm:$ae_immls64))]>; + +class AE_L64_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l64.ip $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L64_IP : AE_L64_IP_X24<[]>; + +class AE_L64_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l64.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L64_X : AE_L64_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l64_x AR:$ars, AR:$art))]>; + +class AE_L64_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l64.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L64_XC : AE_L64_XC_AE_FORMAT48<[]>; + +class AE_L64_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l64.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L64_XP : AE_L64_XP_AE_FORMAT48<[]>; + +class AE_LA16X4_IC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la16x4.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4_IC : AE_LA16X4_IC_X24<[]>; + +class AE_LA16X4_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la16x4.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4_IP : AE_LA16X4_IP_X24<[]>; + +class AE_LA16X4_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la16x4.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4_RIC : AE_LA16X4_RIC_AE_FORMAT48<[]>; + +class AE_LA16X4_RIP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la16x4.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4_RIP : AE_LA16X4_RIP_X24<[]>; + +class AE_LA16X4NEG_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la16x4neg.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4NEG_PC : AE_LA16X4NEG_PC_AE_FORMAT48<[]>; + +class AE_LA16X4POS_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la16x4pos.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4POS_PC : AE_LA16X4POS_PC_AE_FORMAT48<[]>; + +class AE_LA24_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24_IC : AE_LA24_IC_AE_FORMAT48<[]>; + +class AE_LA24_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24_IP : AE_LA24_IP_AE_FORMAT48<[]>; + +class AE_LA24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24_RIC : AE_LA24_RIC_AE_FORMAT48<[]>; + +class AE_LA24_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24_RIP : AE_LA24_RIP_AE_FORMAT48<[]>; + +class AE_LA24NEG_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la24neg.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24NEG_PC : AE_LA24NEG_PC_AE_FORMAT48<[]>; + +class AE_LA24POS_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la24pos.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24POS_PC : AE_LA24POS_PC_AE_FORMAT48<[]>; + +class AE_LA24X2_IC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24x2.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2_IC : AE_LA24X2_IC_X24<[]>; + +class AE_LA24X2_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24x2.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2_IP : AE_LA24X2_IP_X24<[]>; + +class AE_LA24X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24x2.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2_RIC : AE_LA24X2_RIC_AE_FORMAT48<[]>; + +class AE_LA24X2_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24x2.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2_RIP : AE_LA24X2_RIP_AE_FORMAT48<[]>; + +class AE_LA24X2NEG_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la24x2neg.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2NEG_PC : AE_LA24X2NEG_PC_AE_FORMAT48<[]>; + +class AE_LA24X2POS_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la24x2pos.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2POS_PC : AE_LA24X2POS_PC_AE_FORMAT48<[]>; + +class AE_LA32X2_IC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2_IC : AE_LA32X2_IC_X24<[]>; + +class AE_LA32X2_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2_IP : AE_LA32X2_IP_X24<[]>; + +class AE_LA32X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2_RIC : AE_LA32X2_RIC_AE_FORMAT48<[]>; + +class AE_LA32X2_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2_RIP : AE_LA32X2_RIP_AE_FORMAT48<[]>; + +class AE_LA32X2F24_IC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2f24.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2F24_IC : AE_LA32X2F24_IC_X24<[]>; + +class AE_LA32X2F24_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2f24.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2F24_IP : AE_LA32X2F24_IP_X24<[]>; + +class AE_LA32X2F24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2f24.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2F24_RIC : AE_LA32X2F24_RIC_AE_FORMAT48<[]>; + +class AE_LA32X2F24_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2f24.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2F24_RIP : AE_LA32X2F24_RIP_AE_FORMAT48<[]>; + +class AE_LA32X2NEG_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la32x2neg.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2NEG_PC : AE_LA32X2NEG_PC_AE_FORMAT48<[]>; + +class AE_LA32X2POS_PC_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la32x2pos.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{12} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2POS_PC : AE_LA32X2POS_PC_X24<[]>; + +class AE_LA64_PP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_uu), (ins AR:$ars), "ae_la64.pp $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{13} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA64_PP : AE_LA64_PP_X24<[(set AE_VALIGN:$ae_ls_uu, (int_xtensa_ae_la64_pp AR:$ars))]>; + +class AE_LALIGN64_I_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_lalign64.i $ae_ls_uu, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{28} = ae_immls64{2}; +let Inst{29} = ae_immls64{3}; +} + + + +def AE_LALIGN64_I : AE_LALIGN64_I_AE_FORMAT48<[(set AE_VALIGN:$ae_ls_uu, (int_xtensa_ae_lalign64_i AR:$ars, timm:$ae_immls64))]>; + +class AE_LB_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$art), "ae_lb $arr, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_LB : AE_LB_X24<[(set AR:$arr, (int_xtensa_ae_lb AR:$art))]>; + +class AE_LBI_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins imm1_16:$ae_ohba), "ae_lbi $arr, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_ohba; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_LBI : AE_LBI_X24<[(set AR:$arr, (int_xtensa_ae_lbi timm:$ae_ohba))]>; + +class AE_LBK_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$ars, AR:$art), "ae_lbk $arr, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ars; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_LBK : AE_LBK_X24<[(set AR:$arr, (int_xtensa_ae_lbk AR:$ars, AR:$art))]>; + +class AE_LBKI_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$ars, imm1_16:$ae_ohba), "ae_lbki $arr, $ars, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ars; +bits<4> ae_ohba; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_LBKI : AE_LBKI_X24<[(set AR:$arr, (int_xtensa_ae_lbki AR:$ars, timm:$ae_ohba))]>; + +class AE_LBS_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$art), "ae_lbs $arr, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_LBS : AE_LBS_X24<[(set AR:$arr, (int_xtensa_ae_lbs AR:$art))]>; + +class AE_LBSI_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins imm1_16:$ae_ohba), "ae_lbsi $arr, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_ohba; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_LBSI : AE_LBSI_X24<[(set AR:$arr, (int_xtensa_ae_lbsi timm:$ae_ohba))]>; + +class AE_LE16_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR4:$br4), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_le16 $br4, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> br4; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{22} = br4{0}; +let Inst{23} = br4{1}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_LE16 : AE_LE16_AE_FORMAT1<[(set BR4:$br4, (int_xtensa_ae_le16 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LE32_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR2:$br2), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_le32 $br2, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<3> br2; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{20} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{21} = br2{0}; +let Inst{22} = br2{1}; +let Inst{23} = br2{2}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_LE32 : AE_LE32_AE_FORMAT1<[(set BR2:$br2, (int_xtensa_ae_le32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LE64_X24 pattern> + : XtensaAEInst24<(outs BR:$br), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_le64 $br, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_LE64 : AE_LE64_X24<[(set BR:$br, (int_xtensa_ae_le64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LT16_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR4:$br4), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_lt16 $br4, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> br4; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{22} = br4{0}; +let Inst{23} = br4{1}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_LT16 : AE_LT16_AE_FORMAT1<[(set BR4:$br4, (int_xtensa_ae_lt16 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LT32_X24 pattern> + : XtensaAEInst24<(outs BR2:$br2), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_lt32 $br2, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<3> br2; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{13} = br2{0}; +let Inst{14} = br2{1}; +let Inst{15} = br2{2}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_LT32 : AE_LT32_X24<[(set BR2:$br2, (int_xtensa_ae_lt32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LT64_X24 pattern> + : XtensaAEInst24<(outs BR:$br), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_lt64 $br, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_LT64 : AE_LT64_X24<[(set BR:$br, (int_xtensa_ae_lt64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MAX32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_max32 $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_cmpp_v{0}; +let Inst{13} = ae_cmpp_v{1}; +let Inst{14} = ae_cmpp_v{2}; +let Inst{15} = ae_cmpp_v{3}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_MAX32 : AE_MAX32_X24<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_max32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MAX64_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_max64 $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MAX64 : AE_MAX64_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_max64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MAXABS32S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_maxabs32s $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MAXABS32S : AE_MAXABS32S_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_maxabs32s AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MAXABS64S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_maxabs64s $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MAXABS64S : AE_MAXABS64S_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_maxabs64s AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MIN32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_min32 $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_cmpp_v{0}; +let Inst{13} = ae_cmpp_v{1}; +let Inst{14} = ae_cmpp_v{2}; +let Inst{15} = ae_cmpp_v{3}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_MIN32 : AE_MIN32_X24<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_min32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MIN64_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_min64 $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MIN64 : AE_MIN64_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_min64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MINABS32S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_minabs32s $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MINABS32S : AE_MINABS32S_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_minabs32s AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MINABS64S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_minabs64s $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MINABS64S : AE_MINABS64S_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_minabs64s AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MOV_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_mov $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_MOV : AE_MOV_X24<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_mov AE_DR:$ae_to_dr_v0))]>; + +class AE_MOVAD16_0_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad16.0 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD16_0 : AE_MOVAD16_0_X24<[(set AR:$arr, (int_xtensa_ae_movad16_0 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD16_1_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad16.1 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{11} = 1; +let Inst{31} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = ae_dr_to_ar_v0{0}; +let Inst{5} = ae_dr_to_ar_v0{1}; +let Inst{6} = ae_dr_to_ar_v0{2}; +let Inst{7} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD16_1 : AE_MOVAD16_1_AE_FORMAT48<[(set AR:$arr, (int_xtensa_ae_movad16_1 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD16_2_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad16.2 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD16_2 : AE_MOVAD16_2_X24<[(set AR:$arr, (int_xtensa_ae_movad16_2 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD16_3_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad16.3 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD16_3 : AE_MOVAD16_3_X24<[(set AR:$arr, (int_xtensa_ae_movad16_3 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD32_H_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad32.h $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD32_H : AE_MOVAD32_H_X24<[(set AR:$arr, (int_xtensa_ae_movad32_h AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD32_L_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad32.l $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD32_L : AE_MOVAD32_L_X24<[(set AR:$arr, (int_xtensa_ae_movad32_l AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVALIGN_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_uu_uu), (ins AE_VALIGN:$ae_uu_v), "ae_movalign $ae_uu_uu, $ae_uu_v", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_uu_uu; +bits<2> ae_uu_v; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{6} = ae_uu_uu{0}; +let Inst{7} = ae_uu_uu{1}; +let Inst{4} = ae_uu_v{0}; +let Inst{5} = ae_uu_v{1}; +} + + + +def AE_MOVALIGN : AE_MOVALIGN_X24<[(set AE_VALIGN:$ae_uu_uu, (int_xtensa_ae_movalign AE_VALIGN:$ae_uu_v))]>; + +class AE_MOVDA16_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_movda16 $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_MOVDA16 : AE_MOVDA16_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movda16 AR:$ars))]>; + +class AE_MOVDA16X2_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars, AR:$art), "ae_movda16x2 $ae_ar_to_dr_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_MOVDA16X2 : AE_MOVDA16X2_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movda16x2 AR:$ars, AR:$art))]>; + +class AE_MOVDA32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_movda32 $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_MOVDA32 : AE_MOVDA32_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movda32 AR:$ars))]>; + +class AE_MOVDA32X2_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars, AR:$art), "ae_movda32x2 $ae_ar_to_dr_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_MOVDA32X2 : AE_MOVDA32X2_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movda32x2 AR:$ars, AR:$art))]>; + +class AE_MOVF16X4_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR4:$bt4), "ae_movf16x4 $ae_cmov_v, $ae_cmov_v0, $bt4", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<2> bt4; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_cmov_v{0}; +let Inst{21} = ae_cmov_v{1}; +let Inst{22} = ae_cmov_v{2}; +let Inst{23} = ae_cmov_v{3}; +let Inst{36} = ae_cmov_v0{0}; +let Inst{37} = ae_cmov_v0{1}; +let Inst{38} = ae_cmov_v0{2}; +let Inst{39} = ae_cmov_v0{3}; +let Inst{30} = bt4{0}; +let Inst{31} = bt4{1}; +} + + + +def AE_MOVF16X4 : AE_MOVF16X4_AE_FORMAT1<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movf16x4 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR4:$bt4))]>; + +class AE_MOVF32X2_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR2:$bt2), "ae_movf32x2 $ae_cmov_v, $ae_cmov_v0, $bt2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<3> bt2; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_cmov_v{0}; +let Inst{13} = ae_cmov_v{1}; +let Inst{14} = ae_cmov_v{2}; +let Inst{15} = ae_cmov_v{3}; +let Inst{8} = ae_cmov_v0{0}; +let Inst{9} = ae_cmov_v0{1}; +let Inst{10} = ae_cmov_v0{2}; +let Inst{11} = ae_cmov_v0{3}; +let Inst{5} = bt2{0}; +let Inst{6} = bt2{1}; +let Inst{7} = bt2{2}; +} + + + +def AE_MOVF32X2 : AE_MOVF32X2_X24<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movf32x2 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR2:$bt2))]>; + +class AE_MOVF64_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR:$bt), "ae_movf64 $ae_cmov_v, $ae_cmov_v0, $bt", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<4> bt; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{62} = 1; +//operands +let Inst{20} = ae_cmov_v{0}; +let Inst{21} = ae_cmov_v{1}; +let Inst{22} = ae_cmov_v{2}; +let Inst{23} = ae_cmov_v{3}; +let Inst{36} = ae_cmov_v0{0}; +let Inst{37} = ae_cmov_v0{1}; +let Inst{38} = ae_cmov_v0{2}; +let Inst{39} = ae_cmov_v0{3}; +let Inst{28} = bt{0}; +let Inst{29} = bt{1}; +let Inst{30} = bt{2}; +let Inst{31} = bt{3}; +} + + + +def AE_MOVF64 : AE_MOVF64_AE_FORMAT1<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movf64 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR:$bt))]>; + +class AE_MOVI_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins imm16n_47:$movi_imm), "ae_movi $ae_ar_to_dr_v, $movi_imm", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<6> movi_imm; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{4} = movi_imm{0}; +let Inst{5} = movi_imm{1}; +let Inst{8} = movi_imm{2}; +let Inst{9} = movi_imm{3}; +let Inst{10} = movi_imm{4}; +let Inst{11} = movi_imm{5}; +} + + + +def AE_MOVI : AE_MOVI_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movi timm:$movi_imm))]>; + +class AE_MOVT16X4_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR4:$bt4), "ae_movt16x4 $ae_cmov_v, $ae_cmov_v0, $bt4", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<2> bt4; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{28} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_cmov_v{0}; +let Inst{21} = ae_cmov_v{1}; +let Inst{22} = ae_cmov_v{2}; +let Inst{23} = ae_cmov_v{3}; +let Inst{36} = ae_cmov_v0{0}; +let Inst{37} = ae_cmov_v0{1}; +let Inst{38} = ae_cmov_v0{2}; +let Inst{39} = ae_cmov_v0{3}; +let Inst{30} = bt4{0}; +let Inst{31} = bt4{1}; +} + + + +def AE_MOVT16X4 : AE_MOVT16X4_AE_FORMAT1<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movt16x4 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR4:$bt4))]>; + +class AE_MOVT32X2_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR2:$bt2), "ae_movt32x2 $ae_cmov_v, $ae_cmov_v0, $bt2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<3> bt2; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_cmov_v{0}; +let Inst{13} = ae_cmov_v{1}; +let Inst{14} = ae_cmov_v{2}; +let Inst{15} = ae_cmov_v{3}; +let Inst{8} = ae_cmov_v0{0}; +let Inst{9} = ae_cmov_v0{1}; +let Inst{10} = ae_cmov_v0{2}; +let Inst{11} = ae_cmov_v0{3}; +let Inst{5} = bt2{0}; +let Inst{6} = bt2{1}; +let Inst{7} = bt2{2}; +} + + + +def AE_MOVT32X2 : AE_MOVT32X2_X24<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movt32x2 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR2:$bt2))]>; + +class AE_MOVT64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR:$bt), "ae_movt64 $ae_cmov_v, $ae_cmov_v0, $bt", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<4> bt; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_cmov_v{0}; +let Inst{13} = ae_cmov_v{1}; +let Inst{14} = ae_cmov_v{2}; +let Inst{15} = ae_cmov_v{3}; +let Inst{8} = ae_cmov_v0{0}; +let Inst{9} = ae_cmov_v0{1}; +let Inst{10} = ae_cmov_v0{2}; +let Inst{11} = ae_cmov_v0{3}; +let Inst{4} = bt{0}; +let Inst{5} = bt{1}; +let Inst{6} = bt{2}; +let Inst{7} = bt{3}; +} + + + +def AE_MOVT64 : AE_MOVT64_X24<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movt64 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR:$bt))]>; + +class AE_MUL16X4_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mul16x4 $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MUL16X4 : AE_MUL16X4_AE_FORMAT2<[]>; + +class AE_MUL32_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32_HH : AE_MUL32_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32_hh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32_LH : AE_MUL32_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32_lh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32_LL : AE_MUL32_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32_LL_S2 : AE_MUL32_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32U_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32u.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32U_LL : AE_MUL32U_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32u_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_H0 : AE_MUL32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_H0_S2 : AE_MUL32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_h0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_H1 : AE_MUL32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_H1_S2 : AE_MUL32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_h1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_H2 : AE_MUL32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_H2_S2 : AE_MUL32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_h2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_H3 : AE_MUL32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_H3_S2 : AE_MUL32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_h3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_L0 : AE_MUL32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_L0_S2 : AE_MUL32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_L1 : AE_MUL32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_L1_S2 : AE_MUL32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_l1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_L2 : AE_MUL32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_L2_S2 : AE_MUL32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_L3 : AE_MUL32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_L3_S2 : AE_MUL32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_l3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA16X4_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1_out, AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mula16x4 $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; +let Constraints = "$ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out, $ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULA16X4 : AE_MULA16X4_AE_FORMAT2<[]>; + +class AE_MULA32_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32_HH : AE_MULA32_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32_LH : AE_MULA32_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32_LL : AE_MULA32_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32_LL_S2 : AE_MULA32_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32U_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32u.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32U_LL : AE_MULA32U_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32u_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_H0 : AE_MULA32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_H0_S2 : AE_MULA32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_h0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_H1 : AE_MULA32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_H1_S2 : AE_MULA32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_h1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_H2 : AE_MULA32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_H2_S2 : AE_MULA32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_h2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_H3 : AE_MULA32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_H3_S2 : AE_MULA32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_h3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_L0 : AE_MULA32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_L0_S2 : AE_MULA32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_L1 : AE_MULA32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_L1_S2 : AE_MULA32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_L2 : AE_MULA32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_L2_S2 : AE_MULA32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_L3 : AE_MULA32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_L3_S2 : AE_MULA32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaad24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAAD24_HH_LL : AE_MULAAD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaad24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAAD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD24_HH_LL_S2 : AE_MULAAD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaad24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAAD24_HL_LH : AE_MULAAD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaad24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAAD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD24_HL_LH_S2 : AE_MULAAD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD32X16_H0_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaad32x16.h0.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAD32X16_H0_L1 : AE_MULAAD32X16_H0_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaad32x16_h0_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAD32X16_H0_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad32x16.h0.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD32X16_H0_L1_S2 : AE_MULAAD32X16_H0_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad32x16_h0_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaad32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAD32X16_H1_L0 : AE_MULAAD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaad32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD32X16_H1_L0_S2 : AE_MULAAD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD32X16_H2_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaad32x16.h2.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAD32X16_H2_L3 : AE_MULAAD32X16_H2_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaad32x16_h2_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAD32X16_H2_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad32x16.h2.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD32X16_H2_L3_S2 : AE_MULAAD32X16_H2_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad32x16_h2_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaad32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAD32X16_H3_L2 : AE_MULAAD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaad32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD32X16_H3_L2_S2 : AE_MULAAD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD16SS_11_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd16ss.11_00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD16SS_11_00 : AE_MULAAFD16SS_11_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd16ss_11_00 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD16SS_11_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd16ss.11_00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD16SS_11_00_S2 : AE_MULAAFD16SS_11_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd16ss_11_00_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD16SS_13_02_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd16ss.13_02 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD16SS_13_02 : AE_MULAAFD16SS_13_02_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd16ss_13_02 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD16SS_13_02_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd16ss.13_02_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD16SS_13_02_S2 : AE_MULAAFD16SS_13_02_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd16ss_13_02_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD16SS_33_22_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd16ss.33_22 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD16SS_33_22 : AE_MULAAFD16SS_33_22_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd16ss_33_22 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD16SS_33_22_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd16ss.33_22_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD16SS_33_22_S2 : AE_MULAAFD16SS_33_22_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd16ss_33_22_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaafd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{53} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAAFD24_HH_LL : AE_MULAAFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaafd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAAFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD24_HH_LL_S2 : AE_MULAAFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaafd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{53} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAAFD24_HL_LH : AE_MULAAFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaafd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAAFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD24_HL_LH_S2 : AE_MULAAFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD32X16_H0_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd32x16.h0.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD32X16_H0_L1 : AE_MULAAFD32X16_H0_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd32x16_h0_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD32X16_H0_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd32x16.h0.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD32X16_H0_L1_S2 : AE_MULAAFD32X16_H0_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd32x16_h0_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD32X16_H1_L0 : AE_MULAAFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD32X16_H1_L0_S2 : AE_MULAAFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD32X16_H2_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd32x16.h2.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD32X16_H2_L3 : AE_MULAAFD32X16_H2_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd32x16_h2_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD32X16_H2_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd32x16.h2.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD32X16_H2_L3_S2 : AE_MULAAFD32X16_H2_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd32x16_h2_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD32X16_H3_L2 : AE_MULAAFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD32X16_H3_L2_S2 : AE_MULAAFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAC24_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulac24 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAC24 : AE_MULAC24_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulac24 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAC32X16_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulac32x16.h $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAC32X16_H : AE_MULAC32X16_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulac32x16_h AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAC32X16_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulac32x16.l $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAC32X16_L : AE_MULAC32X16_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulac32x16_l AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAF16SS_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf16ss.00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF16SS_00 : AE_MULAF16SS_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf16ss_00 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF16SS_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf16ss.00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF16SS_00_S2 : AE_MULAF16SS_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf16ss_00_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF16SS_10_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.10 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_10 : AE_MULAF16SS_10_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_10 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_11_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.11 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_11 : AE_MULAF16SS_11_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_11 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_20_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.20 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_20 : AE_MULAF16SS_20_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_20 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_21_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.21 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_21 : AE_MULAF16SS_21_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_21 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_22_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.22 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_22 : AE_MULAF16SS_22_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_22 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_30_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.30 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_30 : AE_MULAF16SS_30_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_30 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_31_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.31 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_31 : AE_MULAF16SS_31_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_31 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.32 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_32 : AE_MULAF16SS_32_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_32 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_33_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.33 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_33 : AE_MULAF16SS_33_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_33 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16X4SS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1_out, AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mulaf16x4ss $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; +let Constraints = "$ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out, $ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULAF16X4SS : AE_MULAF16X4SS_AE_FORMAT2<[]>; + +class AE_MULAF32R_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32r.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32R_HH : AE_MULAF32R_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32r_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32R_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32r.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32R_LH : AE_MULAF32R_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32r_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32R_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32r.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32R_LL : AE_MULAF32R_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32r_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32R_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32r.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32R_LL_S2 : AE_MULAF32R_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32r_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32S_HH : AE_MULAF32S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32S_LH : AE_MULAF32S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32S_LL : AE_MULAF32S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32S_LL_S2 : AE_MULAF32S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32s_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_H0 : AE_MULAF32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_H0_S2 : AE_MULAF32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_h0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_H1 : AE_MULAF32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_H1_S2 : AE_MULAF32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_h1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_H2 : AE_MULAF32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_H2_S2 : AE_MULAF32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_h2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_H3 : AE_MULAF32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_H3_S2 : AE_MULAF32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_h3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_L0 : AE_MULAF32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_L0_S2 : AE_MULAF32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_L1 : AE_MULAF32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_L1_S2 : AE_MULAF32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_L2 : AE_MULAF32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_L2_S2 : AE_MULAF32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_L3 : AE_MULAF32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_L3_S2 : AE_MULAF32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF48Q32SP16S_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf48q32sp16s.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF48Q32SP16S_L : AE_MULAF48Q32SP16S_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf48q32sp16s_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF48Q32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf48q32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF48Q32SP16S_L_S2 : AE_MULAF48Q32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf48q32sp16s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF48Q32SP16U_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf48q32sp16u.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF48Q32SP16U_L : AE_MULAF48Q32SP16U_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf48q32sp16u_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF48Q32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf48q32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF48Q32SP16U_L_S2 : AE_MULAF48Q32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf48q32sp16u_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFC24RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulafc24ra $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAFC24RA : AE_MULAFC24RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulafc24ra AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAFC32X16RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulafc32x16ras.h $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAFC32X16RAS_H : AE_MULAFC32X16RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulafc32x16ras_h AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAFC32X16RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulafc32x16ras.l $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAFC32X16RAS_L : AE_MULAFC32X16RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulafc32x16ras_l AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAFD24X2_FIR_H_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd24x2.fir.h $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode + +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD24X2_FIR_H : AE_MULAFD24X2_FIR_H_AE_FORMAT2<[]>; + +class AE_MULAFD24X2_FIR_L_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd24x2.fir.l $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD24X2_FIR_L : AE_MULAFD24X2_FIR_L_AE_FORMAT2<[]>; + +class AE_MULAFD32X16X2_FIR_HH_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd32x16x2.fir.hh $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD32X16X2_FIR_HH : AE_MULAFD32X16X2_FIR_HH_AE_FORMAT2<[]>; + +class AE_MULAFD32X16X2_FIR_HL_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd32x16x2.fir.hl $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD32X16X2_FIR_HL : AE_MULAFD32X16X2_FIR_HL_AE_FORMAT2<[]>; + +class AE_MULAFD32X16X2_FIR_LH_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd32x16x2.fir.lh $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD32X16X2_FIR_LH : AE_MULAFD32X16X2_FIR_LH_AE_FORMAT2<[]>; + +class AE_MULAFD32X16X2_FIR_LL_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd32x16x2.fir.ll $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD32X16X2_FIR_LL : AE_MULAFD32X16X2_FIR_LL_AE_FORMAT2<[]>; + +class AE_MULAFP24X2R_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp24x2r $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP24X2R : AE_MULAFP24X2R_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp24x2r AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP24X2R_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp24x2r_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP24X2R_S2 : AE_MULAFP24X2R_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp24x2r_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP24X2RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp24x2ra $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP24X2RA : AE_MULAFP24X2RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp24x2ra AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP24X2RA_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp24x2ra_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP24X2RA_S2 : AE_MULAFP24X2RA_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp24x2ra_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X16X2RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x16x2ras.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X16X2RAS_H : AE_MULAFP32X16X2RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x16x2ras_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X16X2RAS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp32x16x2ras.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP32X16X2RAS_H_S2 : AE_MULAFP32X16X2RAS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp32x16x2ras_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X16X2RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x16x2ras.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X16X2RAS_L : AE_MULAFP32X16X2RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x16x2ras_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X16X2RAS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp32x16x2ras.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP32X16X2RAS_L_S2 : AE_MULAFP32X16X2RAS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp32x16x2ras_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X16X2RS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x16x2rs.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X16X2RS_H : AE_MULAFP32X16X2RS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x16x2rs_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X16X2RS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp32x16x2rs.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP32X16X2RS_H_S2 : AE_MULAFP32X16X2RS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp32x16x2rs_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X16X2RS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x16x2rs.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X16X2RS_L : AE_MULAFP32X16X2RS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x16x2rs_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X16X2RS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp32x16x2rs.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP32X16X2RS_L_S2 : AE_MULAFP32X16X2RS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp32x16x2rs_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X2RAS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x2ras $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X2RAS : AE_MULAFP32X2RAS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x2ras AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X2RS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x2rs $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X2RS : AE_MULAFP32X2RS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x2rs AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFQ32SP24S_H_S2 : AE_MULAFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafq32sp24s_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFQ32SP24S_L_S2 : AE_MULAFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafq32sp24s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAP24X2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulap24x2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAP24X2 : AE_MULAP24X2_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulap24x2 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAP24X2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulap24x2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAP24X2_S2 : AE_MULAP24X2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulap24x2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAP32X16X2_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulap32x16x2.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAP32X16X2_H : AE_MULAP32X16X2_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulap32x16x2_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAP32X16X2_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulap32x16x2.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAP32X16X2_L : AE_MULAP32X16X2_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulap32x16x2_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAP32X2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulap32x2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAP32X2 : AE_MULAP32X2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulap32x2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAQ32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaq32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAQ32SP16S_L_S2 : AE_MULAQ32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaq32sp16s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAQ32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaq32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAQ32SP16U_L_S2 : AE_MULAQ32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaq32sp16u_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULARFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mularfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULARFQ32SP24S_H_S2 : AE_MULARFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mularfq32sp24s_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULARFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mularfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULARFQ32SP24S_L_S2 : AE_MULARFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mularfq32sp24s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAS32F48P16S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulas32f48p16s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAS32F48P16S_HH : AE_MULAS32F48P16S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulas32f48p16s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAS32F48P16S_HH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulas32f48p16s.hh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAS32F48P16S_HH_S2 : AE_MULAS32F48P16S_HH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulas32f48p16s_hh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAS32F48P16S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulas32f48p16s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAS32F48P16S_LH : AE_MULAS32F48P16S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulas32f48p16s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAS32F48P16S_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulas32f48p16s.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAS32F48P16S_LH_S2 : AE_MULAS32F48P16S_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulas32f48p16s_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAS32F48P16S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulas32f48p16s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAS32F48P16S_LL : AE_MULAS32F48P16S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulas32f48p16s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAS32F48P16S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulas32f48p16s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAS32F48P16S_LL_S2 : AE_MULAS32F48P16S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulas32f48p16s_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulasd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULASD24_HH_LL : AE_MULASD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulasd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULASD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASD24_HH_LL_S2 : AE_MULASD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulasd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULASD24_HL_LH : AE_MULASD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulasd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULASD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASD24_HL_LH_S2 : AE_MULASD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulasd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULASD32X16_H1_L0 : AE_MULASD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulasd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULASD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASD32X16_H1_L0_S2 : AE_MULASD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulasd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULASD32X16_H3_L2 : AE_MULASD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulasd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULASD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASD32X16_H3_L2_S2 : AE_MULASD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulasfd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULASFD24_HH_LL : AE_MULASFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulasfd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULASFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasfd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASFD24_HH_LL_S2 : AE_MULASFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasfd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulasfd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULASFD24_HL_LH : AE_MULASFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulasfd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULASFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasfd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASFD24_HL_LH_S2 : AE_MULASFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasfd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulasfd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULASFD32X16_H1_L0 : AE_MULASFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulasfd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULASFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasfd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASFD32X16_H1_L0_S2 : AE_MULASFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasfd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulasfd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULASFD32X16_H3_L2 : AE_MULASFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulasfd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULASFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasfd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASFD32X16_H3_L2_S2 : AE_MULASFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasfd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULC24_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulc24 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULC24 : AE_MULC24_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulc24 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULC32X16_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulc32x16.h $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULC32X16_H : AE_MULC32X16_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulc32x16_h AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULC32X16_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulc32x16.l $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULC32X16_L : AE_MULC32X16_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulc32x16_l AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULF16SS_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf16ss.00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF16SS_00 : AE_MULF16SS_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf16ss_00 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF16SS_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf16ss.00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF16SS_00_S2 : AE_MULF16SS_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf16ss_00_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF16SS_10_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.10 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_10 : AE_MULF16SS_10_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_10 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_11_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.11 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_11 : AE_MULF16SS_11_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_11 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_20_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.20 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_20 : AE_MULF16SS_20_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_20 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_21_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.21 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_21 : AE_MULF16SS_21_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_21 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_22_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.22 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_22 : AE_MULF16SS_22_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_22 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_30_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.30 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_30 : AE_MULF16SS_30_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_30 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_31_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.31 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_31 : AE_MULF16SS_31_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_31 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.32 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_32 : AE_MULF16SS_32_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_32 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_33_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.33 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_33 : AE_MULF16SS_33_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_33 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16X4SS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mulf16x4ss $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULF16X4SS : AE_MULF16X4SS_AE_FORMAT2<[]>; + +class AE_MULF32R_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32r.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32R_HH : AE_MULF32R_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32r_hh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32R_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32r.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32R_LH : AE_MULF32R_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32r_lh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32R_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32r.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32R_LL : AE_MULF32R_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32r_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32R_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32r.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32R_LL_S2 : AE_MULF32R_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32r_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32S_HH : AE_MULF32S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32S_LH : AE_MULF32S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32S_LL : AE_MULF32S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32S_LL_S2 : AE_MULF32S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32s_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_H0 : AE_MULF32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_H0_S2 : AE_MULF32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_h0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_H1 : AE_MULF32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_H1_S2 : AE_MULF32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_h1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_H2 : AE_MULF32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_H2_S2 : AE_MULF32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_h2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_H3 : AE_MULF32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_H3_S2 : AE_MULF32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_h3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_L0 : AE_MULF32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_L0_S2 : AE_MULF32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_L1 : AE_MULF32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_L1_S2 : AE_MULF32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_l1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_L2 : AE_MULF32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_L2_S2 : AE_MULF32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_L3 : AE_MULF32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_L3_S2 : AE_MULF32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_l3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF48Q32SP16S_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf48q32sp16s.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF48Q32SP16S_L : AE_MULF48Q32SP16S_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf48q32sp16s_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF48Q32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf48q32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF48Q32SP16S_L_S2 : AE_MULF48Q32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf48q32sp16s_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF48Q32SP16U_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf48q32sp16u.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF48Q32SP16U_L : AE_MULF48Q32SP16U_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf48q32sp16u_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF48Q32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf48q32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF48Q32SP16U_L_S2 : AE_MULF48Q32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf48q32sp16u_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFC24RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulfc24ra $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULFC24RA : AE_MULFC24RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulfc24ra AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULFC32X16RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulfc32x16ras.h $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULFC32X16RAS_H : AE_MULFC32X16RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulfc32x16ras_h AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULFC32X16RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulfc32x16ras.l $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULFC32X16RAS_L : AE_MULFC32X16RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulfc32x16ras_l AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULFD24X2_FIR_H_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd24x2.fir.h $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD24X2_FIR_H : AE_MULFD24X2_FIR_H_AE_FORMAT2<[]>; + +class AE_MULFD24X2_FIR_L_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd24x2.fir.l $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD24X2_FIR_L : AE_MULFD24X2_FIR_L_AE_FORMAT2<[]>; + +class AE_MULFD32X16X2_FIR_HH_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd32x16x2.fir.hh $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD32X16X2_FIR_HH : AE_MULFD32X16X2_FIR_HH_AE_FORMAT2<[]>; + +class AE_MULFD32X16X2_FIR_HL_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd32x16x2.fir.hl $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD32X16X2_FIR_HL : AE_MULFD32X16X2_FIR_HL_AE_FORMAT2<[]>; + +class AE_MULFD32X16X2_FIR_LH_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd32x16x2.fir.lh $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD32X16X2_FIR_LH : AE_MULFD32X16X2_FIR_LH_AE_FORMAT2<[]>; + +class AE_MULFD32X16X2_FIR_LL_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd32x16x2.fir.ll $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD32X16X2_FIR_LL : AE_MULFD32X16X2_FIR_LL_AE_FORMAT2<[]>; + +class AE_MULFP16X4RAS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulfp16x4ras $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{23} = 1; +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULFP16X4RAS : AE_MULFP16X4RAS_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulfp16x4ras AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULFP16X4S_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulfp16x4s $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{23} = 1; +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULFP16X4S : AE_MULFP16X4S_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulfp16x4s AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULFP24X2R_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp24x2r $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP24X2R : AE_MULFP24X2R_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp24x2r AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP24X2R_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp24x2r_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP24X2R_S2 : AE_MULFP24X2R_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp24x2r_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP24X2RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp24x2ra $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP24X2RA : AE_MULFP24X2RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp24x2ra AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP24X2RA_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp24x2ra_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP24X2RA_S2 : AE_MULFP24X2RA_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp24x2ra_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X16X2RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x16x2ras.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X16X2RAS_H : AE_MULFP32X16X2RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x16x2ras_h AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X16X2RAS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp32x16x2ras.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP32X16X2RAS_H_S2 : AE_MULFP32X16X2RAS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp32x16x2ras_h_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X16X2RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x16x2ras.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X16X2RAS_L : AE_MULFP32X16X2RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x16x2ras_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X16X2RAS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp32x16x2ras.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP32X16X2RAS_L_S2 : AE_MULFP32X16X2RAS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp32x16x2ras_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X16X2RS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x16x2rs.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X16X2RS_H : AE_MULFP32X16X2RS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x16x2rs_h AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X16X2RS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp32x16x2rs.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP32X16X2RS_H_S2 : AE_MULFP32X16X2RS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp32x16x2rs_h_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X16X2RS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x16x2rs.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X16X2RS_L : AE_MULFP32X16X2RS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x16x2rs_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X16X2RS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp32x16x2rs.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP32X16X2RS_L_S2 : AE_MULFP32X16X2RS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp32x16x2rs_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X2RAS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x2ras $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X2RAS : AE_MULFP32X2RAS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x2ras AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X2RS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x2rs $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X2RS : AE_MULFP32X2RS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x2rs AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFQ32SP24S_H_S2 : AE_MULFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfq32sp24s_h_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFQ32SP24S_L_S2 : AE_MULFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfq32sp24s_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULP24X2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulp24x2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULP24X2 : AE_MULP24X2_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulp24x2 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULP24X2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulp24x2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULP24X2_S2 : AE_MULP24X2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulp24x2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULP32X16X2_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulp32x16x2.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULP32X16X2_H : AE_MULP32X16X2_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulp32x16x2_h AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULP32X16X2_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulp32x16x2.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULP32X16X2_L : AE_MULP32X16X2_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulp32x16x2_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULP32X2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulp32x2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULP32X2 : AE_MULP32X2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulp32x2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULQ32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulq32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULQ32SP16S_L_S2 : AE_MULQ32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulq32sp16s_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULQ32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulq32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULQ32SP16U_L_S2 : AE_MULQ32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulq32sp16u_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULRFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulrfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULRFQ32SP24S_H_S2 : AE_MULRFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulrfq32sp24s_h_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULRFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulrfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULRFQ32SP24S_L_S2 : AE_MULRFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulrfq32sp24s_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS16X4_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1_out, AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_muls16x4 $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; +let Constraints = "$ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out, $ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULS16X4 : AE_MULS16X4_AE_FORMAT2<[]>; + +class AE_MULS32_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32_HH : AE_MULS32_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32_LH : AE_MULS32_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32_LL : AE_MULS32_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32F48P16S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32f48p16s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32F48P16S_HH : AE_MULS32F48P16S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_muls32f48p16s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32F48P16S_HH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32f48p16s.hh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32F48P16S_HH_S2 : AE_MULS32F48P16S_HH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_muls32f48p16s_hh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32F48P16S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32f48p16s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32F48P16S_LH : AE_MULS32F48P16S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_muls32f48p16s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32F48P16S_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32f48p16s.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32F48P16S_LH_S2 : AE_MULS32F48P16S_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_muls32f48p16s_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32F48P16S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32f48p16s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32F48P16S_LL : AE_MULS32F48P16S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_muls32f48p16s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32F48P16S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32f48p16s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32F48P16S_LL_S2 : AE_MULS32F48P16S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_muls32f48p16s_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32U_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32u.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32U_LL : AE_MULS32U_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32u_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_H0 : AE_MULS32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_H0_S2 : AE_MULS32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_h0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_H1 : AE_MULS32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_H1_S2 : AE_MULS32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_h1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_H2 : AE_MULS32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_H2_S2 : AE_MULS32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_h2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_H3 : AE_MULS32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_H3_S2 : AE_MULS32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_h3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_L0 : AE_MULS32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_L0_S2 : AE_MULS32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_L1 : AE_MULS32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_L1_S2 : AE_MULS32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_L2 : AE_MULS32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_L2_S2 : AE_MULS32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_L3 : AE_MULS32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_L3_S2 : AE_MULS32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsad24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSAD24_HH_LL : AE_MULSAD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsad24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSAD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsad24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAD24_HH_LL_S2 : AE_MULSAD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsad24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAD32X16_H1_L0_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsad32x16.h1.l0 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSAD32X16_H1_L0 : AE_MULSAD32X16_H1_L0_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsad32x16_h1_l0 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSAD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsad32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAD32X16_H1_L0_S2 : AE_MULSAD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsad32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAD32X16_H3_L2_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsad32x16.h3.l2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSAD32X16_H3_L2 : AE_MULSAD32X16_H3_L2_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsad32x16_h3_l2 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSAD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsad32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAD32X16_H3_L2_S2 : AE_MULSAD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsad32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsafd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSAFD24_HH_LL : AE_MULSAFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsafd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSAFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsafd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAFD24_HH_LL_S2 : AE_MULSAFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsafd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsafd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSAFD32X16_H1_L0 : AE_MULSAFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsafd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSAFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsafd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAFD32X16_H1_L0_S2 : AE_MULSAFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsafd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsafd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSAFD32X16_H3_L2 : AE_MULSAFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsafd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSAFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsafd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAFD32X16_H3_L2_S2 : AE_MULSAFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsafd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF16SS_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf16ss.00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF16SS_00 : AE_MULSF16SS_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf16ss_00 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF16SS_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf16ss.00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF16SS_00_S2 : AE_MULSF16SS_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf16ss_00_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF16SS_10_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.10 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_10 : AE_MULSF16SS_10_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_10 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_11_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.11 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_11 : AE_MULSF16SS_11_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_11 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_20_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.20 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_20 : AE_MULSF16SS_20_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_20 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_21_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.21 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_21 : AE_MULSF16SS_21_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_21 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_22_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.22 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_22 : AE_MULSF16SS_22_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_22 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_30_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.30 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_30 : AE_MULSF16SS_30_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_30 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_31_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.31 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_31 : AE_MULSF16SS_31_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_31 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.32 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_32 : AE_MULSF16SS_32_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_32 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_33_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.33 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_33 : AE_MULSF16SS_33_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_33 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16X4SS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1_out, AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mulsf16x4ss $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; +let Constraints = "$ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out, $ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULSF16X4SS : AE_MULSF16X4SS_AE_FORMAT2<[]>; + +class AE_MULSF32R_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32r.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32R_HH : AE_MULSF32R_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32r_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32R_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32r.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32R_LH : AE_MULSF32R_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32r_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32R_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32r.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32R_LL : AE_MULSF32R_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32r_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32R_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32r.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32R_LL_S2 : AE_MULSF32R_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32r_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32S_HH : AE_MULSF32S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32S_LH : AE_MULSF32S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32S_LL : AE_MULSF32S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_H0 : AE_MULSF32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_H0_S2 : AE_MULSF32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_h0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_H1 : AE_MULSF32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_H1_S2 : AE_MULSF32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_h1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_H2 : AE_MULSF32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_H2_S2 : AE_MULSF32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_h2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_H3 : AE_MULSF32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_H3_S2 : AE_MULSF32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_h3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_L0 : AE_MULSF32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_L0_S2 : AE_MULSF32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_L1 : AE_MULSF32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_L1_S2 : AE_MULSF32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_L2 : AE_MULSF32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_L2_S2 : AE_MULSF32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_L3 : AE_MULSF32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_L3_S2 : AE_MULSF32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF48Q32SP16S_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf48q32sp16s.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF48Q32SP16S_L : AE_MULSF48Q32SP16S_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf48q32sp16s_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF48Q32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf48q32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF48Q32SP16S_L_S2 : AE_MULSF48Q32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf48q32sp16s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF48Q32SP16U_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf48q32sp16u.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF48Q32SP16U_L : AE_MULSF48Q32SP16U_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf48q32sp16u_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF48Q32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf48q32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF48Q32SP16U_L_S2 : AE_MULSF48Q32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf48q32sp16u_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP24X2R_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp24x2r $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP24X2R : AE_MULSFP24X2R_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp24x2r AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP24X2R_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp24x2r_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP24X2R_S2 : AE_MULSFP24X2R_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp24x2r_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP24X2RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp24x2ra $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP24X2RA : AE_MULSFP24X2RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp24x2ra AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP24X2RA_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp24x2ra_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP24X2RA_S2 : AE_MULSFP24X2RA_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp24x2ra_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X16X2RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x16x2ras.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X16X2RAS_H : AE_MULSFP32X16X2RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x16x2ras_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X16X2RAS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp32x16x2ras.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP32X16X2RAS_H_S2 : AE_MULSFP32X16X2RAS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp32x16x2ras_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X16X2RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x16x2ras.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X16X2RAS_L : AE_MULSFP32X16X2RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x16x2ras_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X16X2RAS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp32x16x2ras.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP32X16X2RAS_L_S2 : AE_MULSFP32X16X2RAS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp32x16x2ras_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X16X2RS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x16x2rs.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X16X2RS_H : AE_MULSFP32X16X2RS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x16x2rs_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X16X2RS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp32x16x2rs.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP32X16X2RS_H_S2 : AE_MULSFP32X16X2RS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp32x16x2rs_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X16X2RS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x16x2rs.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X16X2RS_L : AE_MULSFP32X16X2RS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x16x2rs_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X16X2RS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp32x16x2rs.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP32X16X2RS_L_S2 : AE_MULSFP32X16X2RS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp32x16x2rs_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X2RAS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x2ras $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X2RAS : AE_MULSFP32X2RAS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x2ras AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X2RS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x2rs $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X2RS : AE_MULSFP32X2RS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x2rs AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFQ32SP24S_H_S2 : AE_MULSFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfq32sp24s_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFQ32SP24S_L_S2 : AE_MULSFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfq32sp24s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSP24X2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsp24x2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSP24X2 : AE_MULSP24X2_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsp24x2 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSP24X2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsp24x2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSP24X2_S2 : AE_MULSP24X2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsp24x2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSP32X16X2_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsp32x16x2.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSP32X16X2_H : AE_MULSP32X16X2_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsp32x16x2_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSP32X16X2_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsp32x16x2.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSP32X16X2_L : AE_MULSP32X16X2_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsp32x16x2_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSP32X2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsp32x2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSP32X2 : AE_MULSP32X2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsp32x2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSQ32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsq32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSQ32SP16S_L_S2 : AE_MULSQ32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsq32sp16s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSQ32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsq32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSQ32SP16U_L_S2 : AE_MULSQ32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsq32sp16u_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSRFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsrfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSRFQ32SP24S_H_S2 : AE_MULSRFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsrfq32sp24s_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSRFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsrfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSRFQ32SP24S_L_S2 : AE_MULSRFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsrfq32sp24s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSS32F48P16S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulss32f48p16s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSS32F48P16S_HH : AE_MULSS32F48P16S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulss32f48p16s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSS32F48P16S_HH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulss32f48p16s.hh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSS32F48P16S_HH_S2 : AE_MULSS32F48P16S_HH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulss32f48p16s_hh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSS32F48P16S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulss32f48p16s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSS32F48P16S_LH : AE_MULSS32F48P16S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulss32f48p16s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSS32F48P16S_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulss32f48p16s.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSS32F48P16S_LH_S2 : AE_MULSS32F48P16S_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulss32f48p16s_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSS32F48P16S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulss32f48p16s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSS32F48P16S_LL : AE_MULSS32F48P16S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulss32f48p16s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSS32F48P16S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulss32f48p16s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSS32F48P16S_LL_S2 : AE_MULSS32F48P16S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulss32f48p16s_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulssd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSSD24_HH_LL : AE_MULSSD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulssd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSSD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSD24_HH_LL_S2 : AE_MULSSD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulssd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSSD24_HL_LH : AE_MULSSD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulssd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSSD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSD24_HL_LH_S2 : AE_MULSSD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSD32X16_H1_L0 : AE_MULSSD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSD32X16_H1_L0_S2 : AE_MULSSD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSD32X16_H3_L2 : AE_MULSSD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSD32X16_H3_L2_S2 : AE_MULSSD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD16SS_11_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd16ss.11_00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD16SS_11_00 : AE_MULSSFD16SS_11_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd16ss_11_00 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD16SS_11_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd16ss.11_00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD16SS_11_00_S2 : AE_MULSSFD16SS_11_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd16ss_11_00_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD16SS_13_02_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd16ss.13_02 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD16SS_13_02 : AE_MULSSFD16SS_13_02_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd16ss_13_02 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD16SS_13_02_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd16ss.13_02_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD16SS_13_02_S2 : AE_MULSSFD16SS_13_02_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd16ss_13_02_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD16SS_33_22_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd16ss.33_22 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD16SS_33_22 : AE_MULSSFD16SS_33_22_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd16ss_33_22 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD16SS_33_22_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd16ss.33_22_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD16SS_33_22_S2 : AE_MULSSFD16SS_33_22_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd16ss_33_22_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulssfd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSSFD24_HH_LL : AE_MULSSFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulssfd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSSFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD24_HH_LL_S2 : AE_MULSSFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulssfd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSSFD24_HL_LH : AE_MULSSFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulssfd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSSFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD24_HL_LH_S2 : AE_MULSSFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD32X16_H1_L0 : AE_MULSSFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD32X16_H1_L0_S2 : AE_MULSSFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD32X16_H3_L2 : AE_MULSSFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD32X16_H3_L2_S2 : AE_MULSSFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzaad24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZAAD24_HH_LL : AE_MULZAAD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzaad24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZAAD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD24_HH_LL_S2 : AE_MULZAAD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzaad24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZAAD24_HL_LH : AE_MULZAAD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzaad24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZAAD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD24_HL_LH_S2 : AE_MULZAAD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD32X16_H0_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaad32x16.h0.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAD32X16_H0_L1 : AE_MULZAAD32X16_H0_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaad32x16_h0_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAD32X16_H0_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad32x16.h0.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD32X16_H0_L1_S2 : AE_MULZAAD32X16_H0_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad32x16_h0_l1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaad32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAD32X16_H1_L0 : AE_MULZAAD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaad32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD32X16_H1_L0_S2 : AE_MULZAAD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD32X16_H2_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaad32x16.h2.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAD32X16_H2_L3 : AE_MULZAAD32X16_H2_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaad32x16_h2_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAD32X16_H2_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad32x16.h2.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD32X16_H2_L3_S2 : AE_MULZAAD32X16_H2_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad32x16_h2_l3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaad32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAD32X16_H3_L2 : AE_MULZAAD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaad32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD32X16_H3_L2_S2 : AE_MULZAAD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD16SS_11_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd16ss.11_00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD16SS_11_00 : AE_MULZAAFD16SS_11_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd16ss_11_00 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD16SS_11_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd16ss.11_00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD16SS_11_00_S2 : AE_MULZAAFD16SS_11_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd16ss_11_00_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD16SS_13_02_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd16ss.13_02 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD16SS_13_02 : AE_MULZAAFD16SS_13_02_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd16ss_13_02 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD16SS_13_02_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd16ss.13_02_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD16SS_13_02_S2 : AE_MULZAAFD16SS_13_02_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd16ss_13_02_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD16SS_33_22_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd16ss.33_22 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD16SS_33_22 : AE_MULZAAFD16SS_33_22_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd16ss_33_22 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD16SS_33_22_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd16ss.33_22_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD16SS_33_22_S2 : AE_MULZAAFD16SS_33_22_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd16ss_33_22_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzaafd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZAAFD24_HH_LL : AE_MULZAAFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzaafd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZAAFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD24_HH_LL_S2 : AE_MULZAAFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzaafd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{51} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZAAFD24_HL_LH : AE_MULZAAFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzaafd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZAAFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD24_HL_LH_S2 : AE_MULZAAFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD32X16_H0_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd32x16.h0.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD32X16_H0_L1 : AE_MULZAAFD32X16_H0_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd32x16_h0_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD32X16_H0_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd32x16.h0.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD32X16_H0_L1_S2 : AE_MULZAAFD32X16_H0_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd32x16_h0_l1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD32X16_H1_L0 : AE_MULZAAFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD32X16_H1_L0_S2 : AE_MULZAAFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD32X16_H2_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd32x16.h2.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD32X16_H2_L3 : AE_MULZAAFD32X16_H2_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd32x16_h2_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD32X16_H2_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd32x16.h2.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD32X16_H2_L3_S2 : AE_MULZAAFD32X16_H2_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd32x16_h2_l3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD32X16_H3_L2 : AE_MULZAAFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD32X16_H3_L2_S2 : AE_MULZAAFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzasd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZASD24_HH_LL : AE_MULZASD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzasd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZASD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASD24_HH_LL_S2 : AE_MULZASD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzasd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZASD24_HL_LH : AE_MULZASD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzasd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZASD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASD24_HL_LH_S2 : AE_MULZASD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzasd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZASD32X16_H1_L0 : AE_MULZASD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzasd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZASD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASD32X16_H1_L0_S2 : AE_MULZASD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzasd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZASD32X16_H3_L2 : AE_MULZASD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzasd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZASD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASD32X16_H3_L2_S2 : AE_MULZASD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzasfd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZASFD24_HH_LL : AE_MULZASFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzasfd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZASFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasfd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASFD24_HH_LL_S2 : AE_MULZASFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasfd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzasfd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZASFD24_HL_LH : AE_MULZASFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzasfd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZASFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasfd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASFD24_HL_LH_S2 : AE_MULZASFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasfd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzasfd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZASFD32X16_H1_L0 : AE_MULZASFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzasfd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZASFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasfd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASFD32X16_H1_L0_S2 : AE_MULZASFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasfd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzasfd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZASFD32X16_H3_L2 : AE_MULZASFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzasfd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZASFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasfd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASFD32X16_H3_L2_S2 : AE_MULZASFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasfd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzsad24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSAD24_HH_LL : AE_MULZSAD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzsad24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSAD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsad24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAD24_HH_LL_S2 : AE_MULZSAD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsad24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAD32X16_H1_L0_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzsad32x16.h1.l0 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSAD32X16_H1_L0 : AE_MULZSAD32X16_H1_L0_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzsad32x16_h1_l0 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSAD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsad32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAD32X16_H1_L0_S2 : AE_MULZSAD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsad32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAD32X16_H3_L2_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzsad32x16.h3.l2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{23} = 1; +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSAD32X16_H3_L2 : AE_MULZSAD32X16_H3_L2_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzsad32x16_h3_l2 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSAD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsad32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAD32X16_H3_L2_S2 : AE_MULZSAD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsad32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzsafd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSAFD24_HH_LL : AE_MULZSAFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzsafd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSAFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsafd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAFD24_HH_LL_S2 : AE_MULZSAFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsafd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzsafd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSAFD32X16_H1_L0 : AE_MULZSAFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzsafd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSAFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsafd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAFD32X16_H1_L0_S2 : AE_MULZSAFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsafd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzsafd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSAFD32X16_H3_L2 : AE_MULZSAFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzsafd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSAFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsafd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAFD32X16_H3_L2_S2 : AE_MULZSAFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsafd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzssd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSSD24_HH_LL : AE_MULZSSD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzssd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSSD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSD24_HH_LL_S2 : AE_MULZSSD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzssd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSSD24_HL_LH : AE_MULZSSD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzssd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSSD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSD24_HL_LH_S2 : AE_MULZSSD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSD32X16_H1_L0 : AE_MULZSSD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSD32X16_H1_L0_S2 : AE_MULZSSD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSD32X16_H3_L2 : AE_MULZSSD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSD32X16_H3_L2_S2 : AE_MULZSSD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD16SS_11_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd16ss.11_00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD16SS_11_00 : AE_MULZSSFD16SS_11_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd16ss_11_00 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD16SS_11_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd16ss.11_00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD16SS_11_00_S2 : AE_MULZSSFD16SS_11_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd16ss_11_00_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD16SS_13_02_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd16ss.13_02 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD16SS_13_02 : AE_MULZSSFD16SS_13_02_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd16ss_13_02 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD16SS_13_02_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd16ss.13_02_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD16SS_13_02_S2 : AE_MULZSSFD16SS_13_02_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd16ss_13_02_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD16SS_33_22_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd16ss.33_22 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD16SS_33_22 : AE_MULZSSFD16SS_33_22_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd16ss_33_22 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD16SS_33_22_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd16ss.33_22_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD16SS_33_22_S2 : AE_MULZSSFD16SS_33_22_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd16ss_33_22_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzssfd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSSFD24_HH_LL : AE_MULZSSFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzssfd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSSFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD24_HH_LL_S2 : AE_MULZSSFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzssfd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSSFD24_HL_LH : AE_MULZSSFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzssfd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSSFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD24_HL_LH_S2 : AE_MULZSSFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD32X16_H1_L0 : AE_MULZSSFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD32X16_H1_L0_S2 : AE_MULZSSFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD32X16_H3_L2 : AE_MULZSSFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD32X16_H3_L2_S2 : AE_MULZSSFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_NAND_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1), "ae_nand $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_dr_to_dr_v{0}; +let Inst{21} = ae_dr_to_dr_v{1}; +let Inst{22} = ae_dr_to_dr_v{2}; +let Inst{23} = ae_dr_to_dr_v{3}; +let Inst{36} = ae_dr_to_dr_v0{0}; +let Inst{37} = ae_dr_to_dr_v0{1}; +let Inst{38} = ae_dr_to_dr_v0{2}; +let Inst{39} = ae_dr_to_dr_v0{3}; +let Inst{28} = ae_dr_to_dr_v1{0}; +let Inst{29} = ae_dr_to_dr_v1{1}; +let Inst{30} = ae_dr_to_dr_v1{2}; +let Inst{31} = ae_dr_to_dr_v1{3}; +} + + + +def AE_NAND : AE_NAND_AE_FORMAT1<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_nand AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1))]>; + +class AE_NEG16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg16s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_NEG16S : AE_NEG16S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg16s AE_DR:$ae_arth_v1))]>; + +class AE_NEG24S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg24s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{27} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_NEG24S : AE_NEG24S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg24s AE_DR:$ae_arth_v1))]>; + +class AE_NEG32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg32 $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{24} = 1; +let Inst{27} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_NEG32 : AE_NEG32_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg32 AE_DR:$ae_arth_v1))]>; + +class AE_NEG32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg32s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_NEG32S : AE_NEG32S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg32s AE_DR:$ae_arth_v1))]>; + +class AE_NEG64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg64 $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_NEG64 : AE_NEG64_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg64 AE_DR:$ae_arth_v1))]>; + +class AE_NEG64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg64s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{26} = 1; +let Inst{27} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_NEG64S : AE_NEG64S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg64s AE_DR:$ae_arth_v1))]>; + +class AE_NSA64_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_nsa64 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_NSA64 : AE_NSA64_X24<[(set AR:$arr, (int_xtensa_ae_nsa64 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_NSAZ16_0_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_nsaz16.0 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_NSAZ16_0 : AE_NSAZ16_0_X24<[(set AR:$arr, (int_xtensa_ae_nsaz16_0 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_NSAZ32_L_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_nsaz32.l $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_NSAZ32_L : AE_NSAZ32_L_X24<[(set AR:$arr, (int_xtensa_ae_nsaz32_l AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_OR_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1), "ae_or $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_dr_to_dr_v1{0}; +let Inst{5} = ae_dr_to_dr_v1{1}; +let Inst{6} = ae_dr_to_dr_v1{2}; +let Inst{7} = ae_dr_to_dr_v1{3}; +} + + + +def AE_OR : AE_OR_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_or AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1))]>; + +class AE_PKSR24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_pks_d_out), (ins AE_DR:$ae_pks_d, AE_DR:$ae_pks_s, uimm2:$ae_imm2), "ae_pksr24 $ae_pks_d, $ae_pks_s, $ae_imm2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_pks_d; +bits<4> ae_pks_s; +bits<2> ae_imm2; +let Constraints = "$ae_pks_d = $ae_pks_d_out,@earlyclobber $ae_pks_d_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_pks_d{0}; +let Inst{13} = ae_pks_d{1}; +let Inst{14} = ae_pks_d{2}; +let Inst{15} = ae_pks_d{3}; +let Inst{4} = ae_pks_s{0}; +let Inst{5} = ae_pks_s{1}; +let Inst{6} = ae_pks_s{2}; +let Inst{7} = ae_pks_s{3}; +let Inst{10} = ae_imm2{0}; +let Inst{11} = ae_imm2{1}; +} + + + +def AE_PKSR24 : AE_PKSR24_X24<[(set AE_DR:$ae_pks_d_out, (int_xtensa_ae_pksr24 AE_DR:$ae_pks_d, AE_DR:$ae_pks_s, timm:$ae_imm2))]>; + +class AE_PKSR32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_pks_d_out), (ins AE_DR:$ae_pks_d, AE_DR:$ae_pks_s, uimm2:$ae_imm2), "ae_pksr32 $ae_pks_d, $ae_pks_s, $ae_imm2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_pks_d; +bits<4> ae_pks_s; +bits<2> ae_imm2; +let Constraints = "$ae_pks_d = $ae_pks_d_out,@earlyclobber $ae_pks_d_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_pks_d{0}; +let Inst{13} = ae_pks_d{1}; +let Inst{14} = ae_pks_d{2}; +let Inst{15} = ae_pks_d{3}; +let Inst{4} = ae_pks_s{0}; +let Inst{5} = ae_pks_s{1}; +let Inst{6} = ae_pks_s{2}; +let Inst{7} = ae_pks_s{3}; +let Inst{10} = ae_imm2{0}; +let Inst{11} = ae_imm2{1}; +} + + + +def AE_PKSR32 : AE_PKSR32_X24<[(set AE_DR:$ae_pks_d_out, (int_xtensa_ae_pksr32 AE_DR:$ae_pks_d, AE_DR:$ae_pks_s, timm:$ae_imm2))]>; + +class AE_ROUND16X4F32SASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1, AE_DR:$ae_arth_v0), "ae_round16x4f32sasym $ae_arth_v, $ae_arth_v1, $ae_arth_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; +bits<4> ae_arth_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +} + + + +def AE_ROUND16X4F32SASYM : AE_ROUND16X4F32SASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round16x4f32sasym AE_DR:$ae_arth_v1, AE_DR:$ae_arth_v0))]>; + +class AE_ROUND16X4F32SSYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1, AE_DR:$ae_arth_v0), "ae_round16x4f32ssym $ae_arth_v, $ae_arth_v1, $ae_arth_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; +bits<4> ae_arth_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +} + + + +def AE_ROUND16X4F32SSYM : AE_ROUND16X4F32SSYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round16x4f32ssym AE_DR:$ae_arth_v1, AE_DR:$ae_arth_v0))]>; + +class AE_ROUND24X2F48SASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round24x2f48sasym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUND24X2F48SASYM : AE_ROUND24X2F48SASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round24x2f48sasym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND24X2F48SSYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round24x2f48ssym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUND24X2F48SSYM : AE_ROUND24X2F48SSYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round24x2f48ssym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND32X2F48SASYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round32x2f48sasym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_ROUND32X2F48SASYM : AE_ROUND32X2F48SASYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round32x2f48sasym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND32X2F48SSYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round32x2f48ssym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{61} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_ROUND32X2F48SSYM : AE_ROUND32X2F48SSYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round32x2f48ssym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND32X2F64SASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round32x2f64sasym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUND32X2F64SASYM : AE_ROUND32X2F64SASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round32x2f64sasym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND32X2F64SSYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round32x2f64ssym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_ROUND32X2F64SSYM : AE_ROUND32X2F64SSYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round32x2f64ssym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUNDSP16F24ASYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0), "ae_roundsp16f24asym $ae_arth_v, $ae_arth_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +} + + + +def AE_ROUNDSP16F24ASYM : AE_ROUNDSP16F24ASYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsp16f24asym AE_DR:$ae_arth_v0))]>; + +class AE_ROUNDSP16F24SYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0), "ae_roundsp16f24sym $ae_arth_v, $ae_arth_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +} + + + +def AE_ROUNDSP16F24SYM : AE_ROUNDSP16F24SYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsp16f24sym AE_DR:$ae_arth_v0))]>; + +class AE_ROUNDSP16Q48X2ASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_roundsp16q48x2asym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUNDSP16Q48X2ASYM : AE_ROUNDSP16Q48X2ASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsp16q48x2asym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUNDSP16Q48X2SYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_roundsp16q48x2sym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{58} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_ROUNDSP16Q48X2SYM : AE_ROUNDSP16Q48X2SYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsp16q48x2sym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUNDSQ32F48ASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_roundsq32f48asym $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUNDSQ32F48ASYM : AE_ROUNDSQ32F48ASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsq32f48asym AE_DR:$ae_arth_v1))]>; + +class AE_ROUNDSQ32F48SYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_roundsq32f48sym $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUNDSQ32F48SYM : AE_ROUNDSQ32F48SYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsq32f48sym AE_DR:$ae_arth_v1))]>; + +class AE_S16_0_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm16n_14:$ae_immls16), "ae_s16.0.i $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_S16_0_I : AE_S16_0_I_X24<[(int_xtensa_ae_s16_0_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls16)]>; + +class AE_S16_0_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm16n_14:$ae_immls16), "ae_s16.0.ip $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_S16_0_IP : AE_S16_0_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s16_0_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls16))]>; + +class AE_S16_0_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16.0.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16_0_X : AE_S16_0_X_AE_FORMAT48<[(int_xtensa_ae_s16_0_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S16_0_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16.0.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16_0_XC : AE_S16_0_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16_0_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16_0_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16.0.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16_0_XP : AE_S16_0_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s16_0_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16M_L_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm16n_14:$ae_immls16), "ae_s16m.l.i $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_S16M_L_I : AE_S16M_L_I_X24<[(int_xtensa_ae_s16m_l_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls16)]>; + +class AE_S16M_L_IU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm16n_14:$ae_immls16), "ae_s16m.l.iu $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_S16M_L_IU : AE_S16M_L_IU_X24<[(set AR:$ars_out, (int_xtensa_ae_s16m_l_iu AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls16))]>; + +class AE_S16M_L_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16m.l.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16M_L_X : AE_S16M_L_X_X24<[(int_xtensa_ae_s16m_l_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S16M_L_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16m.l.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16M_L_XC : AE_S16M_L_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16m_l_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16M_L_XU_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16m.l.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16M_L_XU : AE_S16M_L_XU_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16m_l_xu AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16X2M_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s16x2m.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S16X2M_I : AE_S16X2M_I_X24<[(int_xtensa_ae_s16x2m_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32)]>; + +class AE_S16X2M_IU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s16x2m.iu $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S16X2M_IU : AE_S16X2M_IU_X24<[(set AR:$ars_out, (int_xtensa_ae_s16x2m_iu AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32))]>; + +class AE_S16X2M_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x2m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X2M_X : AE_S16X2M_X_X24<[(int_xtensa_ae_s16x2m_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S16X2M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x2m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X2M_XC : AE_S16X2M_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16x2m_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16X2M_XU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x2m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X2M_XU : AE_S16X2M_XU_X24<[(set AR:$ars_out, (int_xtensa_ae_s16x2m_xu AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16X4_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s16x4.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S16X4_I : AE_S16X4_I_X24<[(int_xtensa_ae_s16x4_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64)]>; + +class AE_S16X4_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm0_56:$ae_immls64pos), "ae_s16x4.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_S16X4_IP : AE_S16X4_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s16x4_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64pos))]>; + +class AE_S16X4_RIC_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s16x4.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S16X4_RIC : AE_S16X4_RIC_AE_FORMAT<[(set AR:$ars_out, (int_xtensa_ae_s16x4_ric AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S16X4_RIP_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s16x4.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S16X4_RIP : AE_S16X4_RIP_AE_FORMAT<[(set AR:$ars_out, (int_xtensa_ae_s16x4_rip AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S16X4_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x4.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X4_X : AE_S16X4_X_AE_FORMAT48<[(int_xtensa_ae_s16x4_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S16X4_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x4.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X4_XC : AE_S16X4_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16x4_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16X4_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x4.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X4_XP : AE_S16X4_XP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16x4_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S24RA64S_I_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v1, AR:$ars, imm32n_28:$ae_immls32), "ae_s24ra64s.i $ae_ls_v1, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S24RA64S_I : AE_S24RA64S_I_AE_FORMAT48<[(int_xtensa_ae_s24ra64s_i AE_DR:$ae_ls_v1, AR:$ars, timm:$ae_immls32)]>; + +class AE_S24RA64S_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, imm32n_28:$ae_immls32), "ae_s24ra64s.ip $ae_ls_v1, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S24RA64S_IP : AE_S24RA64S_IP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s24ra64s_ip AE_DR:$ae_ls_v1, AR:$ars, timm:$ae_immls32))]>; + +class AE_S24RA64S_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s24ra64s.x $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S24RA64S_X : AE_S24RA64S_X_AE_FORMAT48<[(int_xtensa_ae_s24ra64s_x AE_DR:$ae_ls_v1, AR:$ars, AR:$art)]>; + +class AE_S24RA64S_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s24ra64s.xc $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S24RA64S_XC : AE_S24RA64S_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s24ra64s_xc AE_DR:$ae_ls_v1, AR:$ars, AR:$art))]>; + +class AE_S24RA64S_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s24ra64s.xp $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S24RA64S_XP : AE_S24RA64S_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s24ra64s_xp AE_DR:$ae_ls_v1, AR:$ars, AR:$art))]>; + +class AE_S24X2RA64S_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v2, AE_DR:$ae_ls_v1, AR:$ars), "ae_s24x2ra64s.ip $ae_ls_v2, $ae_ls_v1, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v2; +bits<4> ae_ls_v1; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +//operands +let Inst{4} = ae_ls_v2{0}; +let Inst{5} = ae_ls_v2{1}; +let Inst{6} = ae_ls_v2{2}; +let Inst{7} = ae_ls_v2{3}; +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S24X2RA64S_IP : AE_S24X2RA64S_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s24x2ra64s_ip AE_DR:$ae_ls_v2, AE_DR:$ae_ls_v1, AR:$ars))]>; + +class AE_S32_L_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32.l.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32_L_I : AE_S32_L_I_X24<[(int_xtensa_ae_s32_l_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32)]>; + +class AE_S32_L_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32.l.ip $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32_L_IP : AE_S32_L_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32_l_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32))]>; + +class AE_S32_L_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32.l.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32_L_X : AE_S32_L_X_X24<[(int_xtensa_ae_s32_l_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32_L_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32.l.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32_L_XC : AE_S32_L_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32_l_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32_L_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32.l.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32_L_XP : AE_S32_L_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32_l_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32F24_L_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32f24.l.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32F24_L_I : AE_S32F24_L_I_X24<[(int_xtensa_ae_s32f24_l_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32)]>; + +class AE_S32F24_L_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32f24.l.ip $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32F24_L_IP : AE_S32F24_L_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32f24_l_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32))]>; + +class AE_S32F24_L_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32f24.l.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32F24_L_X : AE_S32F24_L_X_AE_FORMAT48<[(int_xtensa_ae_s32f24_l_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32F24_L_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32f24.l.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32F24_L_XC : AE_S32F24_L_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32f24_l_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32F24_L_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32f24.l.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32F24_L_XP : AE_S32F24_L_XP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32f24_l_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32M_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32m.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32M_I : AE_S32M_I_X24<[(int_xtensa_ae_s32m_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32)]>; + +class AE_S32M_IU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32m.iu $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32M_IU : AE_S32M_IU_X24<[(set AR:$ars_out, (int_xtensa_ae_s32m_iu AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32))]>; + +class AE_S32M_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32M_X : AE_S32M_X_X24<[(int_xtensa_ae_s32m_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32M_XC : AE_S32M_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32m_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32M_XU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32M_XU : AE_S32M_XU_X24<[(set AR:$ars_out, (int_xtensa_ae_s32m_xu AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32RA64S_I_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v1, AR:$ars, imm32n_28:$ae_immls32), "ae_s32ra64s.i $ae_ls_v1, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32RA64S_I : AE_S32RA64S_I_AE_FORMAT48<[(int_xtensa_ae_s32ra64s_i AE_DR:$ae_ls_v1, AR:$ars, timm:$ae_immls32)]>; + +class AE_S32RA64S_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, imm32n_28:$ae_immls32), "ae_s32ra64s.ip $ae_ls_v1, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32RA64S_IP : AE_S32RA64S_IP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32ra64s_ip AE_DR:$ae_ls_v1, AR:$ars, timm:$ae_immls32))]>; + +class AE_S32RA64S_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s32ra64s.x $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32RA64S_X : AE_S32RA64S_X_AE_FORMAT48<[(int_xtensa_ae_s32ra64s_x AE_DR:$ae_ls_v1, AR:$ars, AR:$art)]>; + +class AE_S32RA64S_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s32ra64s.xc $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32RA64S_XC : AE_S32RA64S_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32ra64s_xc AE_DR:$ae_ls_v1, AR:$ars, AR:$art))]>; + +class AE_S32RA64S_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s32ra64s.xp $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32RA64S_XP : AE_S32RA64S_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32ra64s_xp AE_DR:$ae_ls_v1, AR:$ars, AR:$art))]>; + +class AE_S32X2_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s32x2.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S32X2_I : AE_S32X2_I_X24<[(int_xtensa_ae_s32x2_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64)]>; + +class AE_S32X2_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm0_56:$ae_immls64pos), "ae_s32x2.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_S32X2_IP : AE_S32X2_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64pos))]>; + +class AE_S32X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s32x2.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2_RIC : AE_S32X2_RIC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32x2_ric AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S32X2_RIP_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s32x2.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2_RIP : AE_S32X2_RIP_AE_FORMAT<[(set AR:$ars_out, (int_xtensa_ae_s32x2_rip AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S32X2_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2_X : AE_S32X2_X_X24<[(int_xtensa_ae_s32x2_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32X2_XC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2_XC : AE_S32X2_XC_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32X2_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2_XP : AE_S32X2_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32X2F24_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s32x2f24.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S32X2F24_I : AE_S32X2F24_I_X24<[(int_xtensa_ae_s32x2f24_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64)]>; + +class AE_S32X2F24_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm0_56:$ae_immls64pos), "ae_s32x2f24.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_S32X2F24_IP : AE_S32X2F24_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64pos))]>; + +class AE_S32X2F24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s32x2f24.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2F24_RIC : AE_S32X2F24_RIC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_ric AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S32X2F24_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s32x2f24.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{7} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2F24_RIP : AE_S32X2F24_RIP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_rip AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S32X2F24_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2f24.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2F24_X : AE_S32X2F24_X_X24<[(int_xtensa_ae_s32x2f24_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32X2F24_XC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2f24.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2F24_XC : AE_S32X2F24_XC_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32X2F24_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2f24.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2F24_XP : AE_S32X2F24_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32X2RA64S_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v2, AE_DR:$ae_ls_v1, AR:$ars), "ae_s32x2ra64s.ip $ae_ls_v2, $ae_ls_v1, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v2; +bits<4> ae_ls_v1; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +//operands +let Inst{4} = ae_ls_v2{0}; +let Inst{5} = ae_ls_v2{1}; +let Inst{6} = ae_ls_v2{2}; +let Inst{7} = ae_ls_v2{3}; +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2RA64S_IP : AE_S32X2RA64S_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2ra64s_ip AE_DR:$ae_ls_v2, AE_DR:$ae_ls_v1, AR:$ars))]>; + +class AE_S64_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s64.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S64_I : AE_S64_I_X24<[(int_xtensa_ae_s64_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64)]>; + +class AE_S64_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s64.ip $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S64_IP : AE_S64_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s64_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64))]>; + +class AE_S64_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s64.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S64_X : AE_S64_X_AE_FORMAT48<[(int_xtensa_ae_s64_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S64_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s64.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S64_XC : AE_S64_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s64_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S64_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s64.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S64_XP : AE_S64_XP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s64_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_SA16X4_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa16x4.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA16X4_IC : AE_SA16X4_IC_AE_FORMAT48<[]>; + +class AE_SA16X4_IP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa16x4.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{4} = ae_ls_su{0}; +let Inst{5} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA16X4_IP : AE_SA16X4_IP_X24<[]>; + +class AE_SA16X4_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa16x4.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA16X4_RIC : AE_SA16X4_RIC_AE_FORMAT48<[]>; + +class AE_SA16X4_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa16x4.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA16X4_RIP : AE_SA16X4_RIP_AE_FORMAT48<[]>; + +class AE_SA24_L_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24.l.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24_L_IC : AE_SA24_L_IC_AE_FORMAT48<[]>; + +class AE_SA24_L_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24.l.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24_L_IP : AE_SA24_L_IP_AE_FORMAT48<[]>; + +class AE_SA24_L_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24.l.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24_L_RIC : AE_SA24_L_RIC_AE_FORMAT48<[]>; + +class AE_SA24_L_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24.l.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24_L_RIP : AE_SA24_L_RIP_AE_FORMAT48<[]>; + +class AE_SA24X2_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24x2.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24X2_IC : AE_SA24X2_IC_AE_FORMAT48<[]>; + +class AE_SA24X2_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24x2.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24X2_IP : AE_SA24X2_IP_AE_FORMAT48<[]>; + +class AE_SA24X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24x2.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24X2_RIC : AE_SA24X2_RIC_AE_FORMAT48<[]>; + +class AE_SA24X2_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24x2.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24X2_RIP : AE_SA24X2_RIP_AE_FORMAT48<[]>; + +class AE_SA32X2_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{28} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2_IC : AE_SA32X2_IC_AE_FORMAT48<[]>; + +class AE_SA32X2_IP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{4} = ae_ls_su{0}; +let Inst{5} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2_IP : AE_SA32X2_IP_X24<[]>; + +class AE_SA32X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2_RIC : AE_SA32X2_RIC_AE_FORMAT48<[]>; + +class AE_SA32X2_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2_RIP : AE_SA32X2_RIP_AE_FORMAT48<[]>; + +class AE_SA32X2F24_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2f24.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2F24_IC : AE_SA32X2F24_IC_AE_FORMAT48<[]>; + +class AE_SA32X2F24_IP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2f24.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{4} = ae_ls_su{0}; +let Inst{5} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2F24_IP : AE_SA32X2F24_IP_X24<[]>; + +class AE_SA32X2F24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2f24.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2F24_RIC : AE_SA32X2F24_RIC_AE_FORMAT48<[]>; + +class AE_SA32X2F24_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2f24.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2F24_RIP : AE_SA32X2F24_RIP_AE_FORMAT48<[]>; + +class AE_SA64NEG_FP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out), (ins AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa64neg.fp $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{15} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA64NEG_FP : AE_SA64NEG_FP_AE_FORMAT48<[(set AE_VALIGN:$ae_ls_su_out, (int_xtensa_ae_sa64neg_fp AE_VALIGN:$ae_ls_su, AR:$ars))]>; + +class AE_SA64POS_FP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_su_out), (ins AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa64pos.fp $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = ae_ls_su{0}; +let Inst{5} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA64POS_FP : AE_SA64POS_FP_X24<[(set AE_VALIGN:$ae_ls_su_out, (int_xtensa_ae_sa64pos_fp AE_VALIGN:$ae_ls_su, AR:$ars))]>; + +class AE_SALIGN64_I_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_VALIGN:$ae_ls_su, AR:$ars, imm64n_56:$ae_immls64), "ae_salign64.i $ae_ls_su, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_su; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{14} = 1; +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{28} = ae_immls64{2}; +let Inst{29} = ae_immls64{3}; +} + + + +def AE_SALIGN64_I : AE_SALIGN64_I_AE_FORMAT48<[(int_xtensa_ae_salign64_i AE_VALIGN:$ae_ls_su, AR:$ars, timm:$ae_immls64)]>; + +class AE_SAT16X4_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sat16x4 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_SAT16X4 : AE_SAT16X4_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sat16x4 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SAT24S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_sat24s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_SAT24S : AE_SAT24S_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sat24s AE_DR:$ae_arth_v1))]>; + +class AE_SAT48S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_sat48s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SAT48S : AE_SAT48S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sat48s AE_DR:$ae_arth_v1))]>; + +class AE_SATQ56S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_satq56s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_SATQ56S : AE_SATQ56S_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_satq56s AE_DR:$ae_arth_v1))]>; + +class AE_SB_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_sb $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{13} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_SB : AE_SB_X24<[(set AR:$ars_out, (int_xtensa_ae_sb AR:$ars, AR:$art))]>; + +class AE_SB_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_sb.ic $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_SB_IC : AE_SB_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_sb_ic AR:$ars, AR:$art))]>; + +class AE_SB_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_sb.ip $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_SB_IP : AE_SB_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_sb_ip AR:$ars, AR:$art))]>; + +class AE_SBF_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_sbf $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SBF : AE_SBF_X24<[(set AR:$ars_out, (int_xtensa_ae_sbf AR:$ars))]>; + +class AE_SBF_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_sbf.ic $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SBF_IC : AE_SBF_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_sbf_ic AR:$ars))]>; + +class AE_SBF_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_sbf.ip $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SBF_IP : AE_SBF_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_sbf_ip AR:$ars))]>; + +class AE_SBI_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art, imm1_16:$ae_ohba2), "ae_sbi $ars, $art, $ae_ohba2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +bits<4> ae_ohba2; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{12} = ae_ohba2{0}; +let Inst{13} = ae_ohba2{1}; +let Inst{14} = ae_ohba2{2}; +let Inst{15} = ae_ohba2{3}; +} + + + +def AE_SBI : AE_SBI_X24<[(set AR:$ars_out, (int_xtensa_ae_sbi AR:$ars, AR:$art, timm:$ae_ohba2))]>; + +class AE_SBI_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art, imm1_16:$ae_ohba2), "ae_sbi.ic $ars, $art, $ae_ohba2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +bits<4> ae_ohba2; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{12} = ae_ohba2{0}; +let Inst{13} = ae_ohba2{1}; +let Inst{14} = ae_ohba2{2}; +let Inst{15} = ae_ohba2{3}; +} + + + +def AE_SBI_IC : AE_SBI_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_sbi_ic AR:$ars, AR:$art, timm:$ae_ohba2))]>; + +class AE_SBI_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art, imm1_16:$ae_ohba2), "ae_sbi.ip $ars, $art, $ae_ohba2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +bits<4> ae_ohba2; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{12} = ae_ohba2{0}; +let Inst{13} = ae_ohba2{1}; +let Inst{14} = ae_ohba2{2}; +let Inst{15} = ae_ohba2{3}; +} + + + +def AE_SBI_IP : AE_SBI_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_sbi_ip AR:$ars, AR:$art, timm:$ae_ohba2))]>; + +class AE_SEL16I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1, uimm4:$ae_selimm), "ae_sel16i $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1, $ae_selimm", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; +bits<4> ae_selimm; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_dr_to_dr_v1{0}; +let Inst{5} = ae_dr_to_dr_v1{1}; +let Inst{6} = ae_dr_to_dr_v1{2}; +let Inst{7} = ae_dr_to_dr_v1{3}; +let Inst{16} = ae_selimm{0}; +let Inst{17} = ae_selimm{1}; +let Inst{18} = ae_selimm{2}; +let Inst{19} = ae_selimm{3}; +} + + + +def AE_SEL16I : AE_SEL16I_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_sel16i AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1, timm:$ae_selimm))]>; + +class AE_SEL16I_N_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1, uimm2:$ae_selimm_N), "ae_sel16i.n $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1, $ae_selimm_N", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; +bits<2> ae_selimm_N; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode + +//operands +let Inst{16} = ae_dr_to_dr_v{0}; +let Inst{17} = ae_dr_to_dr_v{1}; +let Inst{18} = ae_dr_to_dr_v{2}; +let Inst{19} = ae_dr_to_dr_v{3}; +let Inst{24} = ae_dr_to_dr_v0{0}; +let Inst{25} = ae_dr_to_dr_v0{1}; +let Inst{26} = ae_dr_to_dr_v0{2}; +let Inst{27} = ae_dr_to_dr_v0{3}; +let Inst{32} = ae_dr_to_dr_v1{0}; +let Inst{33} = ae_dr_to_dr_v1{1}; +let Inst{34} = ae_dr_to_dr_v1{2}; +let Inst{35} = ae_dr_to_dr_v1{3}; +let Inst{48} = ae_selimm_N{0}; +let Inst{49} = ae_selimm_N{1}; +} + + + +def AE_SEL16I_N : AE_SEL16I_N_AE_FORMAT<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_sel16i_n AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1, timm:$ae_selimm_N))]>; + +class AE_SEXT32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, imm7_22:$ae_opnd_tp7), "ae_sext32 $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_opnd_tp7", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_opnd_tp7; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_opnd_tp7{0}; +let Inst{5} = ae_opnd_tp7{1}; +let Inst{6} = ae_opnd_tp7{2}; +let Inst{7} = ae_opnd_tp7{3}; +} + + + +def AE_SEXT32 : AE_SEXT32_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_sext32 AE_DR:$ae_dr_to_dr_v0, timm:$ae_opnd_tp7))]>; + +class AE_SEXT32X2D16_10_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_sext32x2d16.10 $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_SEXT32X2D16_10 : AE_SEXT32X2D16_10_AE_FORMAT<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_sext32x2d16_10 AE_DR:$ae_to_dr_v0))]>; + +class AE_SEXT32X2D16_32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_sext32x2d16.32 $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_SEXT32X2D16_32 : AE_SEXT32X2D16_32_X24<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_sext32x2d16_32 AE_DR:$ae_to_dr_v0))]>; + +class AE_SHA32_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$ars), "ae_sha32 $arr, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SHA32 : AE_SHA32_X24<[(set AR:$arr, (int_xtensa_ae_sha32 AR:$ars))]>; + +class AE_SHORTSWAP_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_shortswap $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_to_dr_v{0}; +let Inst{21} = ae_to_dr_v{1}; +let Inst{22} = ae_to_dr_v{2}; +let Inst{23} = ae_to_dr_v{3}; +let Inst{28} = ae_to_dr_v0{0}; +let Inst{29} = ae_to_dr_v0{1}; +let Inst{30} = ae_to_dr_v0{2}; +let Inst{31} = ae_to_dr_v0{3}; +} + + + +def AE_SHORTSWAP : AE_SHORTSWAP_AE_FORMAT1<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_shortswap AE_DR:$ae_to_dr_v0))]>; + +class AE_SLAA16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa16s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA16S : AE_SLAA16S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa16s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAA32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa32 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA32 : AE_SLAA32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa32 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAA32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa32s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA32S : AE_SLAA32S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa32s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAA64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa64 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA64 : AE_SLAA64_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa64 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAA64S_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa64s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{12} = 1; +let Inst{15} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA64S : AE_SLAA64S_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa64s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAAQ56_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaaq56 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAAQ56 : AE_SLAAQ56_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaaq56 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAI16S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm4:$ae_osa16), "ae_slai16s $ae_shift_d, $ae_shift_d0, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa16{0}; +let Inst{37} = ae_osa16{1}; +let Inst{38} = ae_osa16{2}; +let Inst{39} = ae_osa16{3}; +} + + + +def AE_SLAI16S : AE_SLAI16S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai16s AE_DR:$ae_shift_d0, timm:$ae_osa16))]>; + +class AE_SLAI24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_slai24 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SLAI24 : AE_SLAI24_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai24 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SLAI24S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_slai24s $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SLAI24S : AE_SLAI24S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai24s AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SLAI32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_slai32 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SLAI32 : AE_SLAI32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai32 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SLAI32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_slai32s $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SLAI32S : AE_SLAI32S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai32s AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SLAI64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_slai64 $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa64{0}; +let Inst{9} = ae_osa64{1}; +let Inst{10} = ae_osa64{2}; +let Inst{11} = ae_osa64{3}; +let Inst{16} = ae_osa64{4}; +let Inst{17} = ae_osa64{5}; +} + + + +def AE_SLAI64 : AE_SLAI64_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai64 AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SLAI64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_slai64s $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa64{0}; +let Inst{37} = ae_osa64{1}; +let Inst{38} = ae_osa64{2}; +let Inst{39} = ae_osa64{3}; +let Inst{56} = ae_osa64{4}; +let Inst{57} = ae_osa64{5}; +} + + + +def AE_SLAI64S : AE_SLAI64S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai64s AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SLAISQ56S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_slaisq56s $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa64{0}; +let Inst{9} = ae_osa64{1}; +let Inst{10} = ae_osa64{2}; +let Inst{11} = ae_osa64{3}; +let Inst{16} = ae_osa64{4}; +let Inst{17} = ae_osa64{5}; +} + + + +def AE_SLAISQ56S : AE_SLAISQ56S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaisq56s AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SLAS24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas24 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +} + + + +def AE_SLAS24 : AE_SLAS24_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas24 AE_DR:$ae_shift_d0))]>; + +class AE_SLAS24S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas24s $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLAS24S : AE_SLAS24S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas24s AE_DR:$ae_shift_d0))]>; + +class AE_SLAS32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas32 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLAS32 : AE_SLAS32_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas32 AE_DR:$ae_shift_d0))]>; + +class AE_SLAS32S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas32s $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLAS32S : AE_SLAS32S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas32s AE_DR:$ae_shift_d0))]>; + +class AE_SLAS64_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas64 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{38} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLAS64 : AE_SLAS64_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas64 AE_DR:$ae_shift_d0))]>; + +class AE_SLAS64S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas64s $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +} + + + +def AE_SLAS64S : AE_SLAS64S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas64s AE_DR:$ae_shift_d0))]>; + +class AE_SLASQ56_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slasq56 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +} + + + +def AE_SLASQ56 : AE_SLASQ56_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slasq56 AE_DR:$ae_shift_d0))]>; + +class AE_SLASSQ56S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slassq56s $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLASSQ56S : AE_SLASSQ56S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slassq56s AE_DR:$ae_shift_d0))]>; + +class AE_SRA64_32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sra64_32 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRA64_32 : AE_SRA64_32_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sra64_32 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA16RS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa16rs $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA16RS : AE_SRAA16RS_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa16rs AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA16S_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa16s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{14} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA16S : AE_SRAA16S_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa16s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa32 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA32 : AE_SRAA32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa32 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA32RS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa32rs $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA32RS : AE_SRAA32RS_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa32rs AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa32s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA32S : AE_SRAA32S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa32s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa64 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA64 : AE_SRAA64_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa64 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAI16_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm4:$ae_osa16), "ae_srai16 $ae_shift_d, $ae_shift_d0, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa16{0}; +let Inst{37} = ae_osa16{1}; +let Inst{38} = ae_osa16{2}; +let Inst{39} = ae_osa16{3}; +} + + + +def AE_SRAI16 : AE_SRAI16_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai16 AE_DR:$ae_shift_d0, timm:$ae_osa16))]>; + +class AE_SRAI16R_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm4:$ae_osa16), "ae_srai16r $ae_shift_d, $ae_shift_d0, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa16{0}; +let Inst{37} = ae_osa16{1}; +let Inst{38} = ae_osa16{2}; +let Inst{39} = ae_osa16{3}; +} + + + +def AE_SRAI16R : AE_SRAI16R_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai16r AE_DR:$ae_shift_d0, timm:$ae_osa16))]>; + +class AE_SRAI24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srai24 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SRAI24 : AE_SRAI24_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai24 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRAI32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srai32 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SRAI32 : AE_SRAI32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai32 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRAI32R_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srai32r $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{61} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa32{0}; +let Inst{37} = ae_osa32{1}; +let Inst{38} = ae_osa32{2}; +let Inst{39} = ae_osa32{3}; +let Inst{56} = ae_osa32{4}; +} + + + +def AE_SRAI32R : AE_SRAI32R_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai32r AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRAI64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_srai64 $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa64{0}; +let Inst{9} = ae_osa64{1}; +let Inst{10} = ae_osa64{2}; +let Inst{11} = ae_osa64{3}; +let Inst{16} = ae_osa64{4}; +let Inst{17} = ae_osa64{5}; +} + + + +def AE_SRAI64 : AE_SRAI64_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai64 AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SRAS24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_sras24 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +} + + + +def AE_SRAS24 : AE_SRAS24_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sras24 AE_DR:$ae_shift_d0))]>; + +class AE_SRAS32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_sras32 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRAS32 : AE_SRAS32_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sras32 AE_DR:$ae_shift_d0))]>; + +class AE_SRAS64_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_sras64 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRAS64 : AE_SRAS64_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sras64 AE_DR:$ae_shift_d0))]>; + +class AE_SRLA32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_srla32 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRLA32 : AE_SRLA32_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srla32 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRLA64_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_srla64 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{12} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRLA64 : AE_SRLA64_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srla64 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRLI24_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srli24 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa32{0}; +let Inst{37} = ae_osa32{1}; +let Inst{38} = ae_osa32{2}; +let Inst{39} = ae_osa32{3}; +let Inst{56} = ae_osa32{4}; +} + + + +def AE_SRLI24 : AE_SRLI24_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srli24 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRLI32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srli32 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SRLI32 : AE_SRLI32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srli32 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRLI64_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_srli64 $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa64{0}; +let Inst{37} = ae_osa64{1}; +let Inst{38} = ae_osa64{2}; +let Inst{39} = ae_osa64{3}; +let Inst{56} = ae_osa64{4}; +let Inst{57} = ae_osa64{5}; +} + + + +def AE_SRLI64 : AE_SRLI64_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srli64 AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SRLS24_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_srls24 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRLS24 : AE_SRLS24_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srls24 AE_DR:$ae_shift_d0))]>; + +class AE_SRLS32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_srls32 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRLS32 : AE_SRLS32_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srls32 AE_DR:$ae_shift_d0))]>; + +class AE_SRLS64_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_srls64 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRLS64 : AE_SRLS64_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srls64 AE_DR:$ae_shift_d0))]>; + +class AE_SUB16_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub16 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUB16 : AE_SUB16_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub16 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub16s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SUB16S : AE_SUB16S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub16s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB24S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub24s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUB24S : AE_SUB24S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub24s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub32 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SUB32 : AE_SUB32_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub32 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub32s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SUB32S : AE_SUB32S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub32s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub64 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SUB64 : AE_SUB64_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub64 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub64s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUB64S : AE_SUB64S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub64s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUBADD32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_subadd32 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUBADD32 : AE_SUBADD32_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_subadd32 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUBADD32S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_subadd32s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUBADD32S : AE_SUBADD32S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_subadd32s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_TRUNCA32F64S_L_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, AR:$ars), "ae_trunca32f64s.l $ae_shift_d, $ae_shift_d0, $ae_shift_sd, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_shift_sd; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{12} = ae_shift_sd{0}; +let Inst{13} = ae_shift_sd{1}; +let Inst{14} = ae_shift_sd{2}; +let Inst{15} = ae_shift_sd{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_TRUNCA32F64S_L : AE_TRUNCA32F64S_L_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_trunca32f64s_l AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, AR:$ars))]>; + +class AE_TRUNCA32X2F64S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, AR:$ars), "ae_trunca32x2f64s $ae_shift_d, $ae_shift_d0, $ae_shift_sd, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_shift_sd; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{16} = ae_shift_sd{0}; +let Inst{17} = ae_shift_sd{1}; +let Inst{18} = ae_shift_sd{2}; +let Inst{19} = ae_shift_sd{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_TRUNCA32X2F64S : AE_TRUNCA32X2F64S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_trunca32x2f64s AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, AR:$ars))]>; + +class AE_TRUNCI32F64S_L_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, uimm4:$ae_osa16), "ae_trunci32f64s.l $ae_shift_d, $ae_shift_d0, $ae_shift_sd, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_shift_sd; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{12} = ae_shift_sd{0}; +let Inst{13} = ae_shift_sd{1}; +let Inst{14} = ae_shift_sd{2}; +let Inst{15} = ae_shift_sd{3}; +let Inst{8} = ae_osa16{0}; +let Inst{9} = ae_osa16{1}; +let Inst{10} = ae_osa16{2}; +let Inst{11} = ae_osa16{3}; +} + + + +def AE_TRUNCI32F64S_L : AE_TRUNCI32F64S_L_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_trunci32f64s_l AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, timm:$ae_osa16))]>; + +class AE_TRUNCI32X2F64S_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, uimm4:$ae_osa16), "ae_trunci32x2f64s $ae_shift_d, $ae_shift_d0, $ae_shift_sd, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_shift_sd; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{12} = ae_shift_sd{0}; +let Inst{13} = ae_shift_sd{1}; +let Inst{14} = ae_shift_sd{2}; +let Inst{15} = ae_shift_sd{3}; +let Inst{8} = ae_osa16{0}; +let Inst{9} = ae_osa16{1}; +let Inst{10} = ae_osa16{2}; +let Inst{11} = ae_osa16{3}; +} + + + +def AE_TRUNCI32X2F64S : AE_TRUNCI32X2F64S_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_trunci32x2f64s AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, timm:$ae_osa16))]>; + +class AE_VLDL16C_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vldl16c $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL16C : AE_VLDL16C_X24<[(set AR:$ars_out, (int_xtensa_ae_vldl16c AR:$ars))]>; + +class AE_VLDL16C_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vldl16c.ic $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL16C_IC : AE_VLDL16C_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_vldl16c_ic AR:$ars))]>; + +class AE_VLDL16C_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vldl16c.ip $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL16C_IP : AE_VLDL16C_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_vldl16c_ip AR:$ars))]>; + +class AE_VLDL16T_X24 pattern> + : XtensaAEInst24<(outs BR:$br, AR:$art), (ins AR:$ars), "ae_vldl16t $br, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> art; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL16T : AE_VLDL16T_X24<[]>; + +class AE_VLDL32T_X24 pattern> + : XtensaAEInst24<(outs BR:$br, AR:$art), (ins AR:$ars), "ae_vldl32t $br, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> art; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL32T : AE_VLDL32T_X24<[]>; + +class AE_VLDSHT_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "ae_vldsht $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_VLDSHT : AE_VLDSHT_X24<[(int_xtensa_ae_vldsht AR:$art)]>; + +class AE_VLEL16T_X24 pattern> + : XtensaAEInst24<(outs BR:$br, AR:$art_out), (ins AR:$art, AR:$ars), "ae_vlel16t $br, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> art; +bits<4> ars; +let Constraints = "$art = $art_out,@earlyclobber $art_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLEL16T : AE_VLEL16T_X24<[]>; + +class AE_VLEL32T_X24 pattern> + : XtensaAEInst24<(outs BR:$br, AR:$art_out), (ins AR:$art, AR:$ars), "ae_vlel32t $br, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> art; +bits<4> ars; +let Constraints = "$art = $art_out,@earlyclobber $art_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLEL32T : AE_VLEL32T_X24<[]>; + +class AE_VLES16C_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vles16c $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLES16C : AE_VLES16C_X24<[(set AR:$ars_out, (int_xtensa_ae_vles16c AR:$ars))]>; + +class AE_VLES16C_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vles16c.ic $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLES16C_IC : AE_VLES16C_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_vles16c_ic AR:$ars))]>; + +class AE_VLES16C_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vles16c.ip $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLES16C_IP : AE_VLES16C_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_vles16c_ip AR:$ars))]>; + +class AE_XOR_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1), "ae_xor $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_dr_to_dr_v1{0}; +let Inst{5} = ae_dr_to_dr_v1{1}; +let Inst{6} = ae_dr_to_dr_v1{2}; +let Inst{7} = ae_dr_to_dr_v1{3}; +} + + + +def AE_XOR : AE_XOR_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_xor AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1))]>; + +class AE_ZALIGN64_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_uu_uu), (ins ), "ae_zalign64 $ae_uu_uu", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_uu_uu; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{6} = ae_uu_uu{0}; +let Inst{7} = ae_uu_uu{1}; +} + + + +def AE_ZALIGN64 : AE_ZALIGN64_X24<[(set AE_VALIGN:$ae_uu_uu, (int_xtensa_ae_zalign64 ))]>; + +class RUR_AE_BITHEAD_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_bithead $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{4} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_BITHEAD : RUR_AE_BITHEAD_X24<[(set AR:$arr, (int_xtensa_rur_ae_bithead ))]>; + +class RUR_AE_BITPTR_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_bitptr $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_BITPTR : RUR_AE_BITPTR_X24<[(set AR:$art, (int_xtensa_rur_ae_bitptr ))]>; + +class RUR_AE_BITSUSED_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_bitsused $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_BITSUSED : RUR_AE_BITSUSED_X24<[(set AR:$art, (int_xtensa_rur_ae_bitsused ))]>; + +class RUR_AE_CBEGIN0_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_cbegin0 $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_CBEGIN0 : RUR_AE_CBEGIN0_X24<[(set AR:$arr, (int_xtensa_rur_ae_cbegin0 ))]>; + +class RUR_AE_CEND0_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_cend0 $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_CEND0 : RUR_AE_CEND0_X24<[(set AR:$arr, (int_xtensa_rur_ae_cend0 ))]>; + +class RUR_AE_CW_SD_NO_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_cw_sd_no $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_CW_SD_NO : RUR_AE_CW_SD_NO_X24<[(set AR:$arr, (int_xtensa_rur_ae_cw_sd_no ))]>; + +class RUR_AE_CWRAP_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_cwrap $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_CWRAP : RUR_AE_CWRAP_X24<[(set AR:$art, (int_xtensa_rur_ae_cwrap ))]>; + +class RUR_AE_FIRST_TS_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_first_ts $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_FIRST_TS : RUR_AE_FIRST_TS_X24<[(set AR:$art, (int_xtensa_rur_ae_first_ts ))]>; + +class RUR_AE_NEXTOFFSET_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_nextoffset $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_NEXTOFFSET : RUR_AE_NEXTOFFSET_X24<[(set AR:$art, (int_xtensa_rur_ae_nextoffset ))]>; + +class RUR_AE_OVERFLOW_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_overflow $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_OVERFLOW : RUR_AE_OVERFLOW_X24<[(set AR:$art, (int_xtensa_rur_ae_overflow ))]>; + +class RUR_AE_OVF_SAR_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_ovf_sar $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_OVF_SAR : RUR_AE_OVF_SAR_X24<[(set AR:$arr, (int_xtensa_rur_ae_ovf_sar ))]>; + +class RUR_AE_SAR_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_sar $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_SAR : RUR_AE_SAR_X24<[(set AR:$art, (int_xtensa_rur_ae_sar ))]>; + +class RUR_AE_SEARCHDONE_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_searchdone $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{11} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_SEARCHDONE : RUR_AE_SEARCHDONE_X24<[(set AR:$art, (int_xtensa_rur_ae_searchdone ))]>; + +class RUR_AE_TABLESIZE_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_tablesize $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_TABLESIZE : RUR_AE_TABLESIZE_X24<[(set AR:$art, (int_xtensa_rur_ae_tablesize ))]>; + +class RUR_AE_TS_FTS_BU_BP_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_ts_fts_bu_bp $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{5} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_TS_FTS_BU_BP : RUR_AE_TS_FTS_BU_BP_X24<[(set AR:$arr, (int_xtensa_rur_ae_ts_fts_bu_bp ))]>; + +class WUR_AE_BITHEAD_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_bithead $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{8} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_BITHEAD : WUR_AE_BITHEAD_X24<[(int_xtensa_wur_ae_bithead AR:$art)]>; + +class WUR_AE_BITPTR_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_bitptr $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_BITPTR : WUR_AE_BITPTR_X24<[(int_xtensa_wur_ae_bitptr AR:$art)]>; + +class WUR_AE_BITSUSED_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_bitsused $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_BITSUSED : WUR_AE_BITSUSED_X24<[(int_xtensa_wur_ae_bitsused AR:$art)]>; + +class WUR_AE_CBEGIN0_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_cbegin0 $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_CBEGIN0 : WUR_AE_CBEGIN0_X24<[(int_xtensa_wur_ae_cbegin0 AR:$art)]>; + +class WUR_AE_CEND0_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_cend0 $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_CEND0 : WUR_AE_CEND0_X24<[(int_xtensa_wur_ae_cend0 AR:$art)]>; + +class WUR_AE_CW_SD_NO_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_cw_sd_no $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_CW_SD_NO : WUR_AE_CW_SD_NO_X24<[(int_xtensa_wur_ae_cw_sd_no AR:$art)]>; + +class WUR_AE_CWRAP_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_cwrap $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_CWRAP : WUR_AE_CWRAP_X24<[(int_xtensa_wur_ae_cwrap AR:$art)]>; + +class WUR_AE_FIRST_TS_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_first_ts $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_FIRST_TS : WUR_AE_FIRST_TS_X24<[(int_xtensa_wur_ae_first_ts AR:$art)]>; + +class WUR_AE_NEXTOFFSET_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_nextoffset $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_NEXTOFFSET : WUR_AE_NEXTOFFSET_X24<[(int_xtensa_wur_ae_nextoffset AR:$art)]>; + +class WUR_AE_OVERFLOW_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_overflow $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_OVERFLOW : WUR_AE_OVERFLOW_X24<[(int_xtensa_wur_ae_overflow AR:$art)]>; + +class WUR_AE_OVF_SAR_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_ovf_sar $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_OVF_SAR : WUR_AE_OVF_SAR_X24<[(int_xtensa_wur_ae_ovf_sar AR:$art)]>; + +class WUR_AE_SAR_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_sar $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_SAR : WUR_AE_SAR_X24<[(int_xtensa_wur_ae_sar AR:$art)]>; + +class WUR_AE_SEARCHDONE_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_searchdone $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{11} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_SEARCHDONE : WUR_AE_SEARCHDONE_X24<[(int_xtensa_wur_ae_searchdone AR:$art)]>; + +class WUR_AE_TABLESIZE_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_tablesize $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_TABLESIZE : WUR_AE_TABLESIZE_X24<[(int_xtensa_wur_ae_tablesize AR:$art)]>; + +class WUR_AE_TS_FTS_BU_BP_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_ts_fts_bu_bp $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_TS_FTS_BU_BP : WUR_AE_TS_FTS_BU_BP_X24<[(int_xtensa_wur_ae_ts_fts_bu_bp AR:$art)]>; diff --git a/llvm/lib/Target/Xtensa/XtensaHIFIInstrPatterns.td b/llvm/lib/Target/Xtensa/XtensaHIFIInstrPatterns.td new file mode 100644 index 0000000000000..fc60c8b7c10e7 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaHIFIInstrPatterns.td @@ -0,0 +1,189 @@ +//===- XtensaHIFIInstrPatterns.td - Tablegen patterns for Xtensa HIFI -*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains Tablegen code generation patterns for Xtensa HIFI extension +// +//===----------------------------------------------------------------------===// + +def addr64n_56: ComplexPattern", [frameindex]>; + +def addr32n_28: ComplexPattern", [frameindex]>; + +let Predicates = [HasHIFI3] in { + +def : Pat<(v2i32 (load (addr64n_56 AR:$a, imm64n_56:$imm))), + (AE_L32X2_I AR:$a, imm64n_56:$imm)>; + +def : Pat<(store v2i32:$v, (addr64n_56 AR:$a, imm64n_56:$imm)), + (AE_S32X2_I v2i32:$v, AR:$a, imm64n_56:$imm)>; + +def : Pat<(v1i64 (load (addr64n_56 AR:$a, imm64n_56:$imm))), + (AE_L64_I AR:$a, imm64n_56:$imm)>; + +def : Pat<(store v1i64:$v, (addr64n_56 AR:$a, imm64n_56:$imm)), + (AE_S64_I v1i64:$v, AR:$a, imm64n_56:$imm)>; + +def : Pat<(v4i16 (load (addr64n_56 AR:$a, imm64n_56:$imm))), + (AE_L16X4_I AR:$a, imm64n_56:$imm)>; + +def : Pat<(store v4i16:$v, (addr64n_56 AR:$a, imm64n_56:$imm)), + (AE_S16X4_I v4i16:$v, AR:$a, imm64n_56:$imm)>; + +def : Pat<(v1i32 (load (addr32n_28 AR:$a, imm32n_28:$imm))), + (AE_L32_I AR:$a, imm32n_28:$imm)>; + +def : Pat<(store v1i32:$v, (addr32n_28 AR:$a, imm32n_28:$imm)), + (AE_S32_L_I v1i32:$v, AR:$a, imm32n_28:$imm)>; + + +def : Pat<(v8i8 (load (addr64n_56 AR:$a, imm64n_56:$imm))), + (AE_LALIGN64_I AR:$a, imm64n_56:$imm)>; + +def : Pat<(store AE_VALIGN:$v, (addr64n_56 AR:$a, imm64n_56:$imm)), + (AE_SALIGN64_I AE_VALIGN:$v, AR:$a, imm64n_56:$imm)>; + + +def : Pat<(v2i32 (build_vector AR:$v1, AR:$v2)), + (AE_MOVDA32X2 AR:$v2, AR:$v1)>; + +def : Pat<(v2i32 (build_vector AR:$a, AR:$a)), + (AE_MOVDA32 AR:$a)>; + +/* Build const i64 vector when const fit in [-16,47]*/ +def : Pat<(v1i64 (Xtensa_build_vec imm16n_47:$a)), + (AE_SRLI64 (AE_MOVI imm16n_47:$a), (i32 32))>; + +/* Build const i64 vector with 32-bit const */ +def : Pat<(v1i64 (Xtensa_build_vec AR:$a)), + (AE_SRLI64 (AE_MOVDA32 AR:$a), (i32 32))>; + + +def : Pat<(v1i32 (build_vector AR:$a)), + (AE_MOVDA32 AR:$a)>; + +def : Pat<(v4i16 (build_vector AR:$a, AR:$a, AR:$a, AR:$a)), + (AE_MOVDA16 AR:$a)>; + +def : Pat<(v4i16 (build_vector AR:$v1, AR:$v2, AR:$v1, AR:$v2)), + (AE_MOVDA16X2 AR:$v1, AR:$v2)>; + +def : Pat<(v4i16 (build_vector AR:$v1, AR:$v2, AR:$v3, AR:$v4)), + (AE_OR + (AE_SLAI64 (AE_MOVDA16X2 AR:$v1, AR:$v2), 32), + (AE_MOVDA16X2 AR:$v3, AR:$v4) + )>; + +def : Pat<(i32 (extractelt v2i32:$v1, (i32 0))), + (AE_MOVAD32_L AE_DR:$v1)>; + +def : Pat<(i32 (extractelt v2i32:$v1, (i32 1))), + (AE_MOVAD32_H AE_DR:$v1)>; + +def : Pat<(i32 (extractelt v1i32:$v1, (i32 0))), + (AE_MOVAD32_L AE_DR:$v1)>; + +def : Pat<(i32 (vector_extract v4i16:$v1, (i32 0))), + (AE_MOVAD16_0 AE_DR:$v1)>; + +def : Pat<(i32 (vector_extract v4i16:$v1, (i32 1))), + (AE_MOVAD16_1 AE_DR:$v1)>; + +def : Pat<(i32 (vector_extract v4i16:$v1, (i32 2))), + (AE_MOVAD16_2 AE_DR:$v1)>; + +def : Pat<(i32 (vector_extract v4i16:$v1, (i32 3))), + (AE_MOVAD16_3 AE_DR:$v1)>; + +def : Pat<(v1i32 (extract_subvector v2i32:$v1, (i32 0))), + (AE_MOVDA32 (AE_MOVAD32_L AE_DR:$v1))>; +} + +class CAST_PAT + : Pat<(dst_vt (bitconvert src_vt:$v)), + (COPY_TO_REGCLASS AE_DR:$v, AE_DR)>; + +def : CAST_PAT; +def : CAST_PAT; +def : CAST_PAT; +def : CAST_PAT; +def : CAST_PAT; +def : CAST_PAT; + +def : Pat<(v1i64 (anyext v1i32:$src)), + (AE_SRLI64 v1i32:$src, (i32 32))>; + +def : Pat<(v1i64 (zext v1i32:$src)), + (AE_SRLI64 v1i32:$src, (i32 32))>; + +def : Pat<(v1i64 (sext v1i32:$src)), + (AE_SRAI64 v1i32:$src, (i32 32))>; + +/* +class BIN_PAT + : Pat<(node src_vt:$f1, src_vt:$f2), + (inst dst_vt:$f1, dst_vt:$f2)>; +*/ +foreach VT = AE_DR.RegTypes in { + def : BIN_PAT; + def : BIN_PAT; + def : BIN_PAT; +} + +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; + +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; + +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; + +/* SELECT and SETCC patterns */ +foreach VT = AE_DR.RegTypes in { + def : Pat<(VT (select v1i1:$cc, AE_DR:$t, AE_DR:$f)), + (AE_MOVT64 AE_DR:$t, AE_DR:$f, v1i1:$cc)>; +} +def : Pat<(f32 (int_xtensa_xt_movt_s FPR:$t, FPR:$f, v1i1:$cc)), + (MOVT_S FPR:$t, FPR:$f, v1i1:$cc)>,Requires<[HasSingleFloat]>; + +def : Pat<(f32 (select v1i1:$cc, FPR:$t, FPR:$f)), + (MOVT_S FPR:$t, FPR:$f, v1i1:$cc)>,Requires<[HasSingleFloat]>; + +class SELECTCC_VEC_INT + : Pat<(vt (selectcc i32:$lhs, i32:$rhs, AE_DR:$t, AE_DR:$f, cond)), + (mov AE_DR:$t, AE_DR:$f, (cmp (AE_MOVDA32 AR:$lhs), + (AE_MOVDA32 AR:$rhs)))>; + +foreach vt = [v2i32,v1i32,v1i64,v4i16] in { + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; +} diff --git a/llvm/lib/Target/Xtensa/XtensaHIFIIntrinsics.inc b/llvm/lib/Target/Xtensa/XtensaHIFIIntrinsics.inc new file mode 100644 index 0000000000000..f1cfbd3571627 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaHIFIIntrinsics.inc @@ -0,0 +1,227 @@ +//===- XtensaHIFIIntrinsics.td - Xtensa HIFI intrinsics database for LLVM ISEL +//-*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains Tablegen instruction formats for Xtensa HIFI extension +// +//===----------------------------------------------------------------------===// + +case Intrinsic::xtensa_ae_l16_ip: +return Xtensa::AE_L16_IP; +case Intrinsic::xtensa_ae_l16_xc: +return Xtensa::AE_L16_XC; +case Intrinsic::xtensa_ae_l16_xp: +return Xtensa::AE_L16_XP; +case Intrinsic::xtensa_ae_l16m_iu: +return Xtensa::AE_L16M_IU; +case Intrinsic::xtensa_ae_l16m_xc: +return Xtensa::AE_L16M_XC; +case Intrinsic::xtensa_ae_l16m_xu: +return Xtensa::AE_L16M_XU; +case Intrinsic::xtensa_ae_l16x2m_iu: +return Xtensa::AE_L16X2M_IU; +case Intrinsic::xtensa_ae_l16x2m_xc: +return Xtensa::AE_L16X2M_XC; +case Intrinsic::xtensa_ae_l16x2m_xu: +return Xtensa::AE_L16X2M_XU; +case Intrinsic::xtensa_ae_l16x4_ip: +return Xtensa::AE_L16X4_IP; +case Intrinsic::xtensa_ae_l16x4_ric: +return Xtensa::AE_L16X4_RIC; +case Intrinsic::xtensa_ae_l16x4_rip: +return Xtensa::AE_L16X4_RIP; +case Intrinsic::xtensa_ae_l16x4_xc: +return Xtensa::AE_L16X4_XC; +case Intrinsic::xtensa_ae_l16x4_xp: +return Xtensa::AE_L16X4_XP; +case Intrinsic::xtensa_ae_l32_ip: +return Xtensa::AE_L32_IP; +case Intrinsic::xtensa_ae_l32_xc: +return Xtensa::AE_L32_XC; +case Intrinsic::xtensa_ae_l32_xp: +return Xtensa::AE_L32_XP; +case Intrinsic::xtensa_ae_l32f24_ip: +return Xtensa::AE_L32F24_IP; +case Intrinsic::xtensa_ae_l32f24_xc: +return Xtensa::AE_L32F24_XC; +case Intrinsic::xtensa_ae_l32f24_xp: +return Xtensa::AE_L32F24_XP; +case Intrinsic::xtensa_ae_l32m_iu: +return Xtensa::AE_L32M_IU; +case Intrinsic::xtensa_ae_l32m_xc: +return Xtensa::AE_L32M_XC; +case Intrinsic::xtensa_ae_l32m_xu: +return Xtensa::AE_L32M_XU; +case Intrinsic::xtensa_ae_l32x2_ip: +return Xtensa::AE_L32X2_IP; +case Intrinsic::xtensa_ae_l32x2_ric: +return Xtensa::AE_L32X2_RIC; +case Intrinsic::xtensa_ae_l32x2_rip: +return Xtensa::AE_L32X2_RIP; +case Intrinsic::xtensa_ae_l32x2_xc: +return Xtensa::AE_L32X2_XC; +case Intrinsic::xtensa_ae_l32x2_xp: +return Xtensa::AE_L32X2_XP; +case Intrinsic::xtensa_ae_l32x2f24_ip: +return Xtensa::AE_L32X2F24_IP; +case Intrinsic::xtensa_ae_l32x2f24_ric: +return Xtensa::AE_L32X2F24_RIC; +case Intrinsic::xtensa_ae_l32x2f24_rip: +return Xtensa::AE_L32X2F24_RIP; +case Intrinsic::xtensa_ae_l32x2f24_xc: +return Xtensa::AE_L32X2F24_XC; +case Intrinsic::xtensa_ae_l32x2f24_xp: +return Xtensa::AE_L32X2F24_XP; +case Intrinsic::xtensa_ae_l64_ip: +return Xtensa::AE_L64_IP; +case Intrinsic::xtensa_ae_l64_xc: +return Xtensa::AE_L64_XC; +case Intrinsic::xtensa_ae_l64_xp: +return Xtensa::AE_L64_XP; +case Intrinsic::xtensa_ae_la16x4_ic: +return Xtensa::AE_LA16X4_IC; +case Intrinsic::xtensa_ae_la16x4_ip: +return Xtensa::AE_LA16X4_IP; +case Intrinsic::xtensa_ae_la16x4_ric: +return Xtensa::AE_LA16X4_RIC; +case Intrinsic::xtensa_ae_la16x4_rip: +return Xtensa::AE_LA16X4_RIP; +case Intrinsic::xtensa_ae_la16x4neg_pc: +return Xtensa::AE_LA16X4NEG_PC; +case Intrinsic::xtensa_ae_la16x4pos_pc: +return Xtensa::AE_LA16X4POS_PC; +case Intrinsic::xtensa_ae_la24_ic: +return Xtensa::AE_LA24_IC; +case Intrinsic::xtensa_ae_la24_ip: +return Xtensa::AE_LA24_IP; +case Intrinsic::xtensa_ae_la24_ric: +return Xtensa::AE_LA24_RIC; +case Intrinsic::xtensa_ae_la24_rip: +return Xtensa::AE_LA24_RIP; +case Intrinsic::xtensa_ae_la24neg_pc: +return Xtensa::AE_LA24NEG_PC; +case Intrinsic::xtensa_ae_la24pos_pc: +return Xtensa::AE_LA24POS_PC; +case Intrinsic::xtensa_ae_la24x2_ic: +return Xtensa::AE_LA24X2_IC; +case Intrinsic::xtensa_ae_la24x2_ip: +return Xtensa::AE_LA24X2_IP; +case Intrinsic::xtensa_ae_la24x2_ric: +return Xtensa::AE_LA24X2_RIC; +case Intrinsic::xtensa_ae_la24x2_rip: +return Xtensa::AE_LA24X2_RIP; +case Intrinsic::xtensa_ae_la24x2neg_pc: +return Xtensa::AE_LA24X2NEG_PC; +case Intrinsic::xtensa_ae_la24x2pos_pc: +return Xtensa::AE_LA24X2POS_PC; +case Intrinsic::xtensa_ae_la32x2_ic: +return Xtensa::AE_LA32X2_IC; +case Intrinsic::xtensa_ae_la32x2_ip: +return Xtensa::AE_LA32X2_IP; +case Intrinsic::xtensa_ae_la32x2_ric: +return Xtensa::AE_LA32X2_RIC; +case Intrinsic::xtensa_ae_la32x2_rip: +return Xtensa::AE_LA32X2_RIP; +case Intrinsic::xtensa_ae_la32x2f24_ic: +return Xtensa::AE_LA32X2F24_IC; +case Intrinsic::xtensa_ae_la32x2f24_ip: +return Xtensa::AE_LA32X2F24_IP; +case Intrinsic::xtensa_ae_la32x2f24_ric: +return Xtensa::AE_LA32X2F24_RIC; +case Intrinsic::xtensa_ae_la32x2f24_rip: +return Xtensa::AE_LA32X2F24_RIP; +case Intrinsic::xtensa_ae_la32x2neg_pc: +return Xtensa::AE_LA32X2NEG_PC; +case Intrinsic::xtensa_ae_la32x2pos_pc: +return Xtensa::AE_LA32X2POS_PC; +case Intrinsic::xtensa_ae_mul16x4: +return Xtensa::AE_MUL16X4; +case Intrinsic::xtensa_ae_mula16x4: +return Xtensa::AE_MULA16X4; +case Intrinsic::xtensa_ae_mulaf16x4ss: +return Xtensa::AE_MULAF16X4SS; +case Intrinsic::xtensa_ae_mulafd24x2_fir_h: +return Xtensa::AE_MULAFD24X2_FIR_H; +case Intrinsic::xtensa_ae_mulafd24x2_fir_l: +return Xtensa::AE_MULAFD24X2_FIR_L; +case Intrinsic::xtensa_ae_mulafd32x16x2_fir_hh: +return Xtensa::AE_MULAFD32X16X2_FIR_HH; +case Intrinsic::xtensa_ae_mulafd32x16x2_fir_hl: +return Xtensa::AE_MULAFD32X16X2_FIR_HL; +case Intrinsic::xtensa_ae_mulafd32x16x2_fir_lh: +return Xtensa::AE_MULAFD32X16X2_FIR_LH; +case Intrinsic::xtensa_ae_mulafd32x16x2_fir_ll: +return Xtensa::AE_MULAFD32X16X2_FIR_LL; +case Intrinsic::xtensa_ae_mulf16x4ss: +return Xtensa::AE_MULF16X4SS; +case Intrinsic::xtensa_ae_mulfd24x2_fir_h: +return Xtensa::AE_MULFD24X2_FIR_H; +case Intrinsic::xtensa_ae_mulfd24x2_fir_l: +return Xtensa::AE_MULFD24X2_FIR_L; +case Intrinsic::xtensa_ae_mulfd32x16x2_fir_hh: +return Xtensa::AE_MULFD32X16X2_FIR_HH; +case Intrinsic::xtensa_ae_mulfd32x16x2_fir_hl: +return Xtensa::AE_MULFD32X16X2_FIR_HL; +case Intrinsic::xtensa_ae_mulfd32x16x2_fir_lh: +return Xtensa::AE_MULFD32X16X2_FIR_LH; +case Intrinsic::xtensa_ae_mulfd32x16x2_fir_ll: +return Xtensa::AE_MULFD32X16X2_FIR_LL; +case Intrinsic::xtensa_ae_muls16x4: +return Xtensa::AE_MULS16X4; +case Intrinsic::xtensa_ae_mulsf16x4ss: +return Xtensa::AE_MULSF16X4SS; +case Intrinsic::xtensa_ae_sa16x4_ic: +return Xtensa::AE_SA16X4_IC; +case Intrinsic::xtensa_ae_sa16x4_ip: +return Xtensa::AE_SA16X4_IP; +case Intrinsic::xtensa_ae_sa16x4_ric: +return Xtensa::AE_SA16X4_RIC; +case Intrinsic::xtensa_ae_sa16x4_rip: +return Xtensa::AE_SA16X4_RIP; +case Intrinsic::xtensa_ae_sa24_l_ic: +return Xtensa::AE_SA24_L_IC; +case Intrinsic::xtensa_ae_sa24_l_ip: +return Xtensa::AE_SA24_L_IP; +case Intrinsic::xtensa_ae_sa24_l_ric: +return Xtensa::AE_SA24_L_RIC; +case Intrinsic::xtensa_ae_sa24_l_rip: +return Xtensa::AE_SA24_L_RIP; +case Intrinsic::xtensa_ae_sa24x2_ic: +return Xtensa::AE_SA24X2_IC; +case Intrinsic::xtensa_ae_sa24x2_ip: +return Xtensa::AE_SA24X2_IP; +case Intrinsic::xtensa_ae_sa24x2_ric: +return Xtensa::AE_SA24X2_RIC; +case Intrinsic::xtensa_ae_sa24x2_rip: +return Xtensa::AE_SA24X2_RIP; +case Intrinsic::xtensa_ae_sa32x2_ic: +return Xtensa::AE_SA32X2_IC; +case Intrinsic::xtensa_ae_sa32x2_ip: +return Xtensa::AE_SA32X2_IP; +case Intrinsic::xtensa_ae_sa32x2_ric: +return Xtensa::AE_SA32X2_RIC; +case Intrinsic::xtensa_ae_sa32x2_rip: +return Xtensa::AE_SA32X2_RIP; +case Intrinsic::xtensa_ae_sa32x2f24_ic: +return Xtensa::AE_SA32X2F24_IC; +case Intrinsic::xtensa_ae_sa32x2f24_ip: +return Xtensa::AE_SA32X2F24_IP; +case Intrinsic::xtensa_ae_sa32x2f24_ric: +return Xtensa::AE_SA32X2F24_RIC; +case Intrinsic::xtensa_ae_sa32x2f24_rip: +return Xtensa::AE_SA32X2F24_RIP; +case Intrinsic::xtensa_ae_vldl16t: +return Xtensa::AE_VLDL16T; +case Intrinsic::xtensa_ae_vldl32t: +return Xtensa::AE_VLDL32T; +case Intrinsic::xtensa_ae_vlel16t: +return Xtensa::AE_VLEL16T; +case Intrinsic::xtensa_ae_vlel32t: +return Xtensa::AE_VLEL32T; diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index 481cf3b8ce4a5..7ab2bc04280e3 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -20,6 +20,7 @@ #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" +#include using namespace llvm; @@ -119,6 +120,44 @@ class XtensaDAGToDAGISel : public SelectionDAGISel { return selectMemRegAddr(Addr, Base, Offset, 4); } + template + bool selectMemRegImm(SDValue Addr, SDValue &Base, SDValue &Offset) { + EVT ValTy = Addr.getValueType(); + // if Address is FI, get the TargetFrameIndex. + if (FrameIndexSDNode *FIN = dyn_cast(Addr)) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), ValTy); + + return true; + } + if (TM.isPositionIndependent()) + report_fatal_error("PIC relocations is not supported"); + + if ((Addr.getOpcode() == ISD::TargetExternalSymbol || + Addr.getOpcode() == ISD::TargetGlobalAddress)) + return false; + + if (CurDAG->isBaseWithConstantOffset(Addr)) { + ConstantSDNode *CN = dyn_cast(Addr.getOperand(1)); + int64_t OffsetVal = CN->getSExtValue(); + if (((OffsetVal % std::abs(Scale)) == 0) && (OffsetVal >= Low) && + (OffsetVal <= High)) { + if (FrameIndexSDNode *FIN = + dyn_cast(Addr.getOperand(0))) + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + else + Base = Addr.getOperand(0); + Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(Addr), + Addr.getValueType()); + return true; + } + } + // Last case + Base = Addr; + Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Addr.getValueType()); + return true; + } + // Include the pieces autogenerated from the target description. #include "XtensaGenDAGISel.inc" }; // namespace @@ -131,6 +170,20 @@ FunctionPass *llvm::createXtensaISelDag(XtensaTargetMachine &TM, return new XtensaDAGToDAGISel(TM, OptLevel); } +static std::optional GetXtensaIntrinsic(unsigned IntNo) { + switch (IntNo) { +#include "XtensaHIFIIntrinsics.inc" + case Intrinsic::xtensa_xt_lsxp: + return Xtensa::LSXP; + break; + case Intrinsic::xtensa_xt_lsip: + return Xtensa::LSIP; + break; + default: + return std::nullopt; + } +} + void XtensaDAGToDAGISel::Select(SDNode *Node) { unsigned Opcode = Node->getOpcode(); SDLoc DL(Node); @@ -382,6 +435,51 @@ void XtensaDAGToDAGISel::Select(SDNode *Node) { } break; } + case ISD::INTRINSIC_WO_CHAIN: { + unsigned IntNo = cast(Node->getOperand(0))->getZExtValue(); + auto OpCodeRes = GetXtensaIntrinsic(IntNo); + unsigned OpCode; + if (OpCodeRes) { + OpCode = *OpCodeRes; + } else { + break; + } + + auto ResTys = Node->getVTList(); + + SmallVector Ops; + for (unsigned i = 1; i < Node->getNumOperands(); i++) + Ops.push_back(Node->getOperand(i)); + + SDNode *NewNode = CurDAG->getMachineNode(OpCode, DL, ResTys, Ops); + + ReplaceNode(Node, NewNode); + return; + } + case ISD::INTRINSIC_W_CHAIN: { + unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + unsigned OpCode = 0; + auto OpCodeRes = GetXtensaIntrinsic(IntNo); + if (OpCodeRes) { + OpCode = *OpCodeRes; + } else { + break; + } + + SDValue Chain = Node->getOperand(0); + auto ResTys = Node->getVTList(); + + SmallVector Ops; + for (unsigned i = 2; i < Node->getNumOperands(); i++) + Ops.push_back(Node->getOperand(i)); + Ops.push_back(Chain); + + SDNode *NewNode = CurDAG->getMachineNode(OpCode, DL, ResTys, Ops); + + ReplaceNode(Node, NewNode); + return; + } + default: break; } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 68bf23ec2da85..e248cde518ffc 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -36,6 +36,24 @@ using namespace llvm; static const MCPhysReg XtensaArgRegs[6] = {Xtensa::A2, Xtensa::A3, Xtensa::A4, Xtensa::A5, Xtensa::A6, Xtensa::A7}; +static const MCPhysReg VecRegs[] = {Xtensa::AED0, Xtensa::AED1, Xtensa::AED2, + Xtensa::AED3}; + +static const MVT VectorIntTypes[] = { + MVT::v2i32, + MVT::v1i32, + MVT::v4i16, + MVT::v1i64, +}; + +template static bool isVecVT(VT ValVT) { + for (const auto &V : VectorIntTypes) { + auto VV = VT(V); + if (VV == ValVT) + return true; + } + return false; +} // Return true if we must use long (in fact, indirect) function call. // It's simplified version, production implimentation must // resolve a functions in ROM (usually glibc functions) @@ -57,6 +75,7 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &tm, const XtensaSubtarget &STI) : TargetLowering(tm), Subtarget(STI) { MVT PtrVT = MVT::i32; + // Set up the register classes. addRegisterClass(MVT::i32, &Xtensa::ARRegClass); @@ -64,6 +83,44 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &tm, addRegisterClass(MVT::f32, &Xtensa::FPRRegClass); } + if (Subtarget.hasBoolean()) { + addRegisterClass(MVT::v1i1, &Xtensa::BRRegClass); + addRegisterClass(MVT::v2i1, &Xtensa::BR2RegClass); + addRegisterClass(MVT::v4i1, &Xtensa::BR4RegClass); + setOperationAction(ISD::Constant, MVT::v2i1, Expand); + setOperationAction(ISD::Constant, MVT::v1i1, Expand); + setTargetDAGCombine(ISD::STORE); + setTargetDAGCombine(ISD::BITCAST); + setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR); + + setOperationAction(ISD::STORE, MVT::v1i1, Legal); + setOperationAction(ISD::STORE, MVT::v2i1, Legal); + setOperationAction(ISD::STORE, MVT::v4i1, Legal); + setOperationAction(ISD::LOAD, MVT::v1i1, Legal); + setOperationAction(ISD::LOAD, MVT::v2i1, Legal); + setOperationAction(ISD::LOAD, MVT::v4i1, Legal); + } + + if (Subtarget.hasHIFI3()) { + for (MVT VT : VectorIntTypes) { + addRegisterClass(VT, &Xtensa::AE_DRRegClass); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); + // handle bicast v8i8 to VEC_VT + setOperationAction(ISD::BITCAST, VT, Custom); + } + addRegisterClass(MVT::v8i8, &Xtensa::AE_VALIGNRegClass); + // handle bicast VEC_VT to v8i8 + setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); + + setOperationAction(ISD::SIGN_EXTEND, MVT::v1i32, Expand); + setOperationAction(ISD::ZERO_EXTEND, MVT::v1i32, Expand); + setOperationAction(ISD::ANY_EXTEND, MVT::v1i32, Expand); + setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Legal); + + setTargetDAGCombine(ISD::BUILD_VECTOR); + setOperationAction(ISD::MUL, MVT::v1i64, Expand); + } + // Set up special registers. setStackPointerRegisterToSaveRestore(Xtensa::SP); @@ -95,7 +152,8 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &tm, // Used by legalize types to correctly generate the setcc result. // AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); - setOperationPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); + if (!Subtarget.hasBoolean()) + setOperationPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); setOperationPromotedToType(ISD::BR_CC, MVT::i1, MVT::i32); setOperationAction(ISD::BR_CC, MVT::i32, Legal); @@ -368,6 +426,33 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &tm, } } + for (MVT VT : MVT::fixedlen_vector_valuetypes()) { + if (isTypeLegal(VT)) { + setOperationAction(ISD::CTPOP, VT, Expand); + setOperationAction(ISD::SRL, VT, Expand); + setOperationAction(ISD::SRA, VT, Expand); + setOperationAction(ISD::SHL, VT, Expand); + + // Expand all divisions and remainders for vectors + setOperationAction(ISD::SDIV, VT, Expand); + setOperationAction(ISD::UDIV, VT, Expand); + setOperationAction(ISD::SREM, VT, Expand); + setOperationAction(ISD::UREM, VT, Expand); + } + setOperationAction(ISD::SDIVREM, VT, Expand); + setOperationAction(ISD::UDIVREM, VT, Expand); + + setOperationAction(ISD::SELECT_CC, VT, Custom); + setOperationAction(ISD::SETCC, VT, Custom); + + // Disable all narrowing stores and extending loads for vectors + for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { + setTruncStoreAction(VT, InnerVT, Expand); + setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); + setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); + } + } // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); } @@ -736,6 +821,107 @@ static SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG, LHS, RHS, Dest); } +static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + SDLoc DL(N); + EVT VT = N->getValueType(0); + SDValue Op0 = N->getOperand(0); + ConstantSDNode *Const = dyn_cast(Op0); + if (VT == MVT::v1i64 && Const) { + int64_t Val = Const->getSExtValue(); + if (Val <= std::numeric_limits::max()) + return DAG.getNode(XtensaISD::BUILD_VEC, DL, MVT::v1i64, + DAG.getConstant(Val, DL, MVT::i32)); + } + return SDValue(); +} + +static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + // (vNi1 (bitcast (iN (trunc i32)))) -> (vNi1 (xtensa_bitcast i32)) + SDLoc DL(N); + SDValue Op = N->getOperand(0); + + if (N->getOpcode() != ISD::BITCAST || Op.getOpcode() != ISD::TRUNCATE) + return SDValue(); + + SDValue Int = Op.getOperand(0); + llvm::EVT BoolVT = N->getValueType(0); + + if (!BoolVT.isVector() || BoolVT.getVectorElementType() != MVT::i1 || + Int.getValueType() != MVT::i32) + return SDValue(); + + SDValue Trunc = DAG.getNode(XtensaISD::TRUNC, DL, BoolVT, Int); + + return Trunc; +} + +static SDValue +PerformExtractSubvectorCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + // (vNi1 (extract_subvector (v8i1 (load x))) -> (vNi1 (load x)) + SDLoc DL(N); + SDValue Load = N->getOperand(0); + + if (N->getOpcode() != ISD::EXTRACT_SUBVECTOR) + return SDValue(); + + EVT LoadVT = Load.getValueType(); + EVT BoolVT = N->getValueType(0); + + if (!BoolVT.isVector() || BoolVT.getVectorElementType() != MVT::i1) + return SDValue(); + + if (Load.getOpcode() != ISD::LOAD) + return SDValue(); + + LoadSDNode *LdNode = cast(Load.getNode()); + + if (!LoadVT.isVector() || LoadVT.getVectorElementType() != MVT::i1) + return SDValue(); + + SDValue NewLoad = + DAG.getLoad(BoolVT, DL, LdNode->getChain(), LdNode->getBasePtr(), + LdNode->getPointerInfo(), LdNode->getOriginalAlign(), + LdNode->getMemOperand()->getFlags()); + + return NewLoad; +} +static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + // (store (v8i1 (concat_vector (vNi1 elt) undef )) addr off) + // -> (store (vNi1 elt) addr off) + SDLoc DL(N); + + if (N->getOpcode() != ISD::STORE) + return SDValue(); + + StoreSDNode *StNode = cast(N); + + SDValue Concat = N->getOperand(1); + EVT BoolVT = Concat.getValueType(); + + if ((Concat.getOpcode() != ISD::CONCAT_VECTORS) || !BoolVT.isVector() || + (BoolVT.getVectorElementType() != MVT::i1)) + return SDValue(); + + SDValue Val = Concat.getNode()->getOperand(0); + EVT ValVT = Val.getValueType(); + + if (!ValVT.isVector() || ValVT.getVectorElementType() != MVT::i1 || + ValVT.getSizeInBits() > 8) { + return SDValue(); + } + + return DAG.getStore(StNode->getChain(), DL, Val, StNode->getBasePtr(), + StNode->getMemOperand()); +} + SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -752,6 +938,14 @@ SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, return PerformHWLoopCombine(N, DAG, DCI, Subtarget); case ISD::BRCOND: return PerformBRCONDCombine(N, DAG, DCI, Subtarget); + case ISD::BUILD_VECTOR: + return PerformBUILD_VECTORCombine(N, DAG, DCI, Subtarget); + case ISD::BITCAST: + return PerformBITCASTCombine(N, DAG, DCI, Subtarget); + case ISD::EXTRACT_SUBVECTOR: + return PerformExtractSubvectorCombine(N, DAG, DCI, Subtarget); + case ISD::STORE: + return PerformSTORECombine(N, DAG, DCI, Subtarget); } return SDValue(); @@ -768,6 +962,17 @@ static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT, ISD::ArgFlagsTy ArgFlags, CCState &State) { static const MCPhysReg IntRegs[] = {Xtensa::A2, Xtensa::A3, Xtensa::A4, Xtensa::A5, Xtensa::A6, Xtensa::A7}; + static const MCPhysReg BoolRegs[] = { + Xtensa::B0, Xtensa::B1, Xtensa::B2, Xtensa::B3, + Xtensa::B4, Xtensa::B5, Xtensa::B6, Xtensa::B7, + Xtensa::B8, Xtensa::B9, Xtensa::B10, Xtensa::B11, + Xtensa::B12, Xtensa::B13, Xtensa::B14, Xtensa::B15}; + + ArrayRef BR2Regs(Xtensa::BR2RegClass.begin(), + Xtensa::BR2RegClass.end()); + + ArrayRef BR4Regs(Xtensa::BR4RegClass.begin(), + Xtensa::BR4RegClass.end()); if (ArgFlags.isByVal()) { Align ByValAlign = ArgFlags.getNonZeroByValAlign(); @@ -824,6 +1029,17 @@ static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT, Reg = State.AllocateReg(IntRegs); State.AllocateReg(IntRegs); LocVT = MVT::i32; + } else if (ValVT == MVT::v1i1) { + Reg = State.AllocateReg(BoolRegs); + } else if (ValVT == MVT::v2i1) { + Reg = State.AllocateReg(BR2Regs); + LocVT = ValVT; + } else if (ValVT == MVT::v4i1) { + Reg = State.AllocateReg(BR4Regs); + LocVT = ValVT; + } else if (isVecVT(ValVT)) { + Reg = State.AllocateReg(VecRegs); + LocVT = ValVT; } else llvm_unreachable("Cannot handle this ValVT."); @@ -918,6 +1134,14 @@ SDValue XtensaTargetLowering::LowerFormalArguments( if (RegVT == MVT::i32) { RC = &Xtensa::ARRegClass; + } else if (RegVT == MVT::v1i1) { + RC = &Xtensa::BRRegClass; + } else if (RegVT == MVT::v2i1) { + RC = &Xtensa::BR2RegClass; + } else if (RegVT == MVT::v4i1) { + RC = &Xtensa::BR4RegClass; + } else if (isVecVT(RegVT)) { + RC = &Xtensa::AE_DRRegClass; } else llvm_unreachable("RegVT not supported by FormalArguments Lowering"); @@ -1356,6 +1580,8 @@ SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op, else if (TrueV.getValueType() == MVT::f32) return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueV.getValueType(), LHS, RHS, TrueV, FalseV, TargetCC); + else if (TrueV.getValueType().isVector()) + return Op; else return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueV, FalseV, TargetCC); @@ -1385,7 +1611,7 @@ SDValue XtensaTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { // SETCC has BRCOND predecessor, return original operation if (Val) - return Op; + return SDValue(); // Expand to target SELECT_CC SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType()); @@ -1397,6 +1623,8 @@ SDValue XtensaTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { else if (TrueV.getValueType() == MVT::f32) return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueV.getValueType(), LHS, RHS, TrueV, FalseV, TargetCC); + else if (TrueV.getValueType().isVector()) + return SDValue(); else return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueV, FalseV, TargetCC); @@ -1776,7 +2004,7 @@ SDValue XtensaTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { unsigned VAListSize = 12; return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2), DAG.getConstant(VAListSize, SDLoc(Op), MVT::i32), - Align(8), false, true, false, MachinePointerInfo(), + Align(4), false, true, false, MachinePointerInfo(), MachinePointerInfo()); } @@ -1886,6 +2114,14 @@ SDValue XtensaTargetLowering::LowerATOMIC_FENCE(SDValue Op, return DAG.getNode(XtensaISD::MEMW, DL, MVT::Other, Chain); } +SDValue XtensaTargetLowering::LowerBitVecLOAD(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT VT = Op.getValueType(); + assert(VT.isVector() && VT.getSizeInBits() <= 8); + return SDValue(); // Expand +} + SDValue XtensaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { @@ -1936,6 +2172,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, case ISD::FSHL: case ISD::FSHR: return LowerFunnelShift(Op, DAG); + case ISD::BITCAST: + return LowerBITCAST(Op, DAG); default: llvm_unreachable("Unexpected node to lower"); } @@ -1980,6 +2218,7 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { OPCODE(SRC); OPCODE(SSL); OPCODE(SSR); + OPCODE(BUILD_VEC); } return NULL; #undef OPCODE @@ -3397,14 +3636,59 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( case Xtensa::LSIP: case Xtensa::LSX: case Xtensa::LSXP: { - const MachineMemOperand &MMO = **MI.memoperands_begin(); - if (MMO.isVolatile()) { - BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); + if (MI.memoperands().size() > 0) { + const MachineMemOperand &MMO = **MI.memoperands_begin(); + if (MMO.isVolatile()) { + BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); + } } return MBB; } + case Xtensa::MOVBA_P: + case Xtensa::MOVBA2_P: { + const TargetRegisterClass *AR = getRegClassFor(MVT::i32); + + Register Dst1 = MRI.createVirtualRegister(AR); + Register Dst2 = MRI.createVirtualRegister(AR); + MachineOperand Breg = MI.getOperand(0); + MachineOperand Src = MI.getOperand(1); + + /* + MOVBA_P2 Breg, Dst1, Dest2, Src + */ + unsigned TargetOpcode; + switch (MI.getOpcode()) { + case Xtensa::MOVBA_P: + TargetOpcode = Xtensa::MOVBA_P2; + break; + case Xtensa::MOVBA2_P: + TargetOpcode = Xtensa::MOVBA2_P2; + break; + case Xtensa::MOVBA4_P: + TargetOpcode = Xtensa::MOVBA4_P2; + break; + default: + llvm_unreachable("Unknown opcode"); + } + BuildMI(*MBB, MI, DL, TII.get(TargetOpcode), Breg.getReg()) + .addReg(Dst1, RegState::Define | RegState::EarlyClobber) + .addReg(Dst2, RegState::Define | RegState::EarlyClobber) + .addReg(Src.getReg()); + + MI.eraseFromParent(); + + return MBB; + } default: return EmitDSPInstrWithCustomInserter(MI, MBB, TII, MF, MRI, DL); // llvm_unreachable("Unexpected instr type to insert"); } } + +SDValue XtensaTargetLowering::LowerBITCAST(SDValue Op, + SelectionDAG &DAG) const { + assert(Op.getValueType().isVector()); + if (Op.getOperand(0).getValueType() == MVT::v8i8) + return SDValue(); // Expand + return Op; // Legal +} diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 3f170d57e16b9..eea9a8ba56d4f 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -91,7 +91,10 @@ enum { SRL, SRC, SSL, - SSR + SSR, + + BUILD_VEC, + TRUNC, }; } @@ -178,6 +181,8 @@ class XtensaTargetLowering : public TargetLowering { const SmallVectorImpl &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override; + SDValue LowerVectorShift(SDValue Op, SelectionDAG &DAG) const; + bool shouldInsertFencesForAtomic(const Instruction *I) const override { return true; } @@ -227,6 +232,9 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBitVecLOAD(SDValue Op, SelectionDAG &DAG) const; + SDValue getAddrPCRel(SDValue Op, SelectionDAG &DAG) const; CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index 04d3d2d3c0411..a84aba944d686 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -105,6 +105,23 @@ void XtensaInstrInfo::copyPhysReg(MachineBasicBlock &MBB, else if (STI.hasSingleFloat() && Xtensa::ARRegClass.contains(SrcReg) && Xtensa::FPRRegClass.contains(DestReg)) Opcode = Xtensa::WFR; + else if (STI.hasBoolean() && Xtensa::BRRegClass.contains(SrcReg) && + Xtensa::BRRegClass.contains(DestReg)) { + BuildMI(MBB, MBBI, DL, get(Xtensa::ORB), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addReg(SrcReg, getKillRegState(KillSrc)); + return; + } else if (STI.hasHIFI3() && Xtensa::AE_DRRegClass.contains(DestReg, SrcReg)) + Opcode = Xtensa::AE_MOV; + else if (STI.hasHIFI3() && Xtensa::AE_DRRegClass.contains(DestReg) && + Xtensa::ARRegClass.contains(SrcReg)) + Opcode = Xtensa::AE_MOVDA32; + else if (STI.hasHIFI3() && Xtensa::AE_DRRegClass.contains(SrcReg) && + Xtensa::ARRegClass.contains(DestReg)) + Opcode = Xtensa::AE_MOVAD32_L; + else if (STI.hasHIFI3() && + Xtensa::AE_VALIGNRegClass.contains(DestReg, SrcReg)) + Opcode = Xtensa::AE_MOVALIGN; else llvm_unreachable("Impossible reg-to-reg copy"); @@ -148,27 +165,35 @@ void XtensaInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, } else if (RC == &Xtensa::FPRRegClass) { LoadOpcode = Xtensa::LSI; StoreOpcode = Xtensa::SSI; + } else if (RC == &Xtensa::BRRegClass) { + LoadOpcode = Xtensa::RESTORE_BOOL; + StoreOpcode = Xtensa::SPILL_BOOL; + } else if (RC == &Xtensa::AE_DRRegClass) { + LoadOpcode = Xtensa::AE_L64_I; + StoreOpcode = Xtensa::AE_S64_I; + } else if (RC == &Xtensa::AE_VALIGNRegClass) { + LoadOpcode = Xtensa::AE_LALIGN64_I; + StoreOpcode = Xtensa::AE_SALIGN64_I; } else llvm_unreachable("Unsupported regclass to load or store"); } -void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, +MachineInstrBuilder +XtensaInstrInfo::buildLoadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, - unsigned *Reg, int64_t Value) const { + unsigned Reg, int64_t Value) const { DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); - MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); - const TargetRegisterClass *RC = &Xtensa::ARRegClass; - // create virtual reg to store immediate - *Reg = RegInfo.createVirtualRegister(RC); if (Value >= -2048 && Value <= 2047) { - BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), *Reg).addImm(Value); + return BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), Reg).addImm(Value); } else if (Value >= -32768 && Value <= 32767) { int Low = Value & 0xFF; int High = Value & ~0xFF; - BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), *Reg).addImm(Low); - BuildMI(MBB, MBBI, DL, get(Xtensa::ADDMI), *Reg).addReg(*Reg).addImm(High); + BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), Reg).addImm(Low); + return BuildMI(MBB, MBBI, DL, get(Xtensa::ADDMI), Reg) + .addReg(Reg) + .addImm(High); } else if (Value >= -4294967296LL && Value <= 4294967295LL) { // 32 bit arbirary constant MachineConstantPool *MCP = MBB.getParent()->getConstantPool(); @@ -178,7 +203,8 @@ void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, false); unsigned Idx = MCP->getConstantPoolIndex(CVal, Align(2U)); // MCSymbol MSym - BuildMI(MBB, MBBI, DL, get(Xtensa::L32R), *Reg).addConstantPoolIndex(Idx); + return BuildMI(MBB, MBBI, DL, get(Xtensa::L32R), Reg) + .addConstantPoolIndex(Idx); } else { // use L32R to let assembler load immediate best // TODO replace to L32R @@ -186,6 +212,18 @@ void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, } } +void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned *Reg, int64_t Value) const { + DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); + MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); + const TargetRegisterClass *RC = &Xtensa::ARRegClass; + + // create virtual reg to store immediate + *Reg = RegInfo.createVirtualRegister(RC); + buildLoadImmediate(MBB, MBBI, *Reg, Value); +} + unsigned XtensaInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { switch (MI.getOpcode()) { case TargetOpcode::INLINEASM: { // Inline Asm: Variable size. diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h index f2d50c3abafe4..b3925ec03de58 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h @@ -65,6 +65,10 @@ class XtensaInstrInfo : public XtensaGenInstrInfo { // physical register Reg. void loadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned *Reg, int64_t Value) const; + + MachineInstrBuilder buildLoadImmediate(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned Reg, int64_t Value) const; bool reverseBranchCondition(SmallVectorImpl &Cond) const override; MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 51b0dd9a0cba5..a89a0a55f07f6 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -29,6 +29,11 @@ class ArithLogic_RRR oper2, bits<4> oper1, string instrAsm, let isReMaterializable = 0; } +class Arith_Pat + : Pat<(res_vt (opNode op_vt:$r, op_vt:$t)), + (inst op_vt:$r, op_vt:$t)>; + def ADD : ArithLogic_RRR<0x08, 0x00, "add", add, 1>; def SUB : ArithLogic_RRR<0x0C, 0x00, "sub", sub>; def AND : ArithLogic_RRR<0x01, 0x00, "and", and, 1>; @@ -646,6 +651,20 @@ def RUR : RRR_Inst<0x00, 0x03, 0x0E, (outs AR:$r), (ins UR:$ur), let t = ur{3-0}; } +def WUR_FCR :RRR_Inst<0x00, 0x03, 0x0F, (outs), (ins AR:$t), + "wur.fcr \t$t", []> { + let r = 232{7-4}; + let s = 232{3-0}; + let Defs = [FCR]; +} + +def WUR_FSR :RRR_Inst<0x00, 0x03, 0x0F, (outs), (ins AR:$t), + "wur.fsr\t $t", []> { + let r = 233{7-4}; + let s = 233{3-0}; + let Defs = [FSR]; +} + //===----------------------------------------------------------------------===// // External Registers read/write instructions //===----------------------------------------------------------------------===// @@ -892,6 +911,11 @@ def ROTW : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins imm8n_7:$imm), //===----------------------------------------------------------------------===// // Boolean Instructions //===----------------------------------------------------------------------===// +class BIN_PAT + : Pat<(dst_vt (node src_vt:$f1, src_vt:$f2)), + (inst src_vt:$f1, src_vt:$f2)>; + def ALL4 : RRR_Inst<0x00, 0x00, 0x00, (outs BR:$t), (ins BR:$s), "all4\t$t, $s", []>, Requires<[HasBoolean]> { @@ -907,6 +931,12 @@ def ANDB : RRR_Inst<0x00, 0x02, 0x00, (outs BR:$r), (ins BR:$s, BR:$t), "andb\t$r, $s, $t", []>, Requires<[HasBoolean]>; def ANDBC : RRR_Inst<0x00, 0x02, 0x01, (outs BR:$r), (ins BR:$s, BR:$t), "andbc\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def ORB : RRR_Inst<0x00, 0x02, 0x02, (outs BR:$r), (ins BR:$s, BR:$t), + "orb\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def ORBC : RRR_Inst<0x00, 0x02, 0x03, (outs BR:$r), (ins BR:$s, BR:$t), + "orbc\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def XORB : RRR_Inst<0x00, 0x02, 0x04, (outs BR:$r), (ins BR:$s, BR:$t), + "xorb\t$r, $s, $t", []>, Requires<[HasBoolean]>; def ANY4 : RRR_Inst<0x00, 0x00, 0x00, (outs BR:$t), (ins BR:$s), "any4\t$t, $s", []>, Requires<[HasBoolean]> { @@ -942,21 +972,102 @@ let isBranch = 1, isTerminator = 1, Predicates = [HasBoolean] in { } } -def MOVF : RRR_Inst<0x00, 0x03, 0x0C, (outs AR:$r), (ins AR:$s, BR:$t), +let Constraints = "$dr = $r,@earlyclobber $dr" in { + def MOVF : RRR_Inst<0x00, 0x03, 0x0C, (outs AR:$dr), (ins AR:$r, AR:$s, BR:$t), "movf\t$r, $s, $t", []>, Requires<[HasBoolean]>; -def MOVT : RRR_Inst<0x00, 0x03, 0x0D, (outs AR:$r), (ins AR:$s, BR:$t), - "movt\t$r, $s, $t", []>, Requires<[HasBoolean]>; -def ORB : RRR_Inst<0x00, 0x02, 0x02, (outs BR:$r), (ins BR:$s, BR:$t), - "orb\t$r, $s, $t", []>, Requires<[HasBoolean]>; -def ORBC : RRR_Inst<0x00, 0x02, 0x03, (outs BR:$r), (ins BR:$s, BR:$t), - "orbc\t$r, $s, $t", []>, Requires<[HasBoolean]>; -def XORB : RRR_Inst<0x00, 0x02, 0x04, (outs BR:$r), (ins BR:$s, BR:$t), - "xorb\t$r, $s, $t", []>, Requires<[HasBoolean]>; + def MOVT : RRR_Inst<0x00, 0x03, 0x0D, (outs AR:$dr), (ins AR:$r, AR:$s, BR:$t), + "movt\t$r, $s, $t", []>, Requires<[HasBoolean]>; +} def : Pat<(Xtensa_br_t BR:$b, bb:$target), (BT BR:$b, bb:$target)>; def : Pat<(Xtensa_br_f BR:$b, bb:$target), (BF BR:$b, bb:$target)>; +let Predicates = [HasBoolean] in { + + def OR_BR_PAT: BIN_PAT; + def XOR_BR_PAT: BIN_PAT; + def AND_BR_PAT: BIN_PAT; + + // vselect C T F = C * T + ~C * F + def : Pat<(v1i1 (vselect v1i1:$c, v1i1:$t, v1i1:$f)), + (ORB (ANDB $t, $f), (ANDBC $f, $c))>; + + + def MOVBA_P2: Pseudo<(outs BR:$r, AR:$x, AR:$y), (ins AR:$s), + "!movba $r, $x, $y, $s", []> { + let Defs = [BREG]; + } + + def MOVBA_P: Pseudo<(outs BR:$r), (ins AR:$s), + "!movba $r, $s", []> { + let usesCustomInserter = 1; + let Defs = [BREG]; + } + + def MOVBA2_P2: Pseudo<(outs BR2:$r, AR:$x, AR:$y), (ins AR:$s), + "!movba $r, $x, $y, $s", []> { + let Defs = [BREG]; + } + + def MOVBA2_P: Pseudo<(outs BR2:$r), (ins AR:$s), + "!movba2 $r, $s", []> { + let usesCustomInserter = 1; + let Defs = [BREG]; + } + + def MOVBA4_P2: Pseudo<(outs BR4:$r, AR:$x, AR:$y), (ins AR:$s), + "!movba4 $r, $x, $y, $s", []> { + let Defs = [BREG]; + } + + def MOVBA4_P: Pseudo<(outs BR4:$r), (ins AR:$s), + "!movba4 $r, $s", []> { + let usesCustomInserter = 1; + let Defs = [BREG]; + } + + def EXTUI_BR_P: Pseudo<(outs AR:$r), (ins AR:$s, BR:$b), + "!extui_br $r, $s, $b", []>; + def EXTUI_BR2_P: Pseudo<(outs AR:$r), (ins AR:$s, BR2:$b), + "!extui_br2 $r, $s, $b", []>; + def EXTUI_BR4_P: Pseudo<(outs AR:$r), (ins AR:$s, BR4:$b), + "!extui_br4 $r, $s, $b", []>; + def SLLI_BR_P: Pseudo<(outs AR:$r), (ins AR:$s, BR:$b), + "!slli_br $r, $s, $b", []>; + + def : Pat<(v1i1 (build_vector AR:$a)), (MOVBA_P AR:$a)>; + def : Pat<(v1i1 (scalar_to_vector AR:$a)), (MOVBA_P AR:$a)>; + + def : Pat<(v2i1 (build_vector AR:$a, AR:$b)), + (MOVBA2_P (OR AR:$a, (SLLI AR:$b, (i32 1))))>; + + def : Pat<(v2i1 (Xtensa_trunc AR:$s)), (MOVBA2_P AR:$s)>; + def : Pat<(v4i1 (Xtensa_trunc AR:$s)), (MOVBA4_P AR:$s)>; + + def : Pat<(i32 (vector_extract (v1i1 BR:$b), (i32 0))), + (EXTUI_BR_P (RSR BREG), BR:$b)>; + def : Pat<(v1i1 (load addr_ish1:$addr)), (MOVBA_P (L8UI mem8:$addr))>; + def : Pat<(v2i1 (load addr_ish1:$addr)), (MOVBA2_P (L8UI mem8:$addr))>; + def : Pat<(v4i1 (load addr_ish1:$addr)), (MOVBA4_P (L8UI mem8:$addr))>; + + def : Pat<(store BR:$b, addr_ish1:$addr), (S8I (EXTUI_BR_P (RSR BREG), BR:$b), mem32:$addr)>; + def : Pat<(store BR2:$b, addr_ish1:$addr), (S8I (EXTUI_BR2_P (RSR BREG), BR2:$b), mem32:$addr)>; + def : Pat<(store BR4:$b, addr_ish1:$addr), (S8I (EXTUI_BR4_P (RSR BREG), BR4:$b), mem32:$addr)>; + + def SPILL_BOOL: Pseudo<(outs), (ins BR:$b, mem8:$mem), + "!spill_bool $b, $mem",[]> { + let mayStore = 1; + } + + def RESTORE_BOOL: Pseudo<(outs BR:$out), (ins mem8:$mem), + "!restore_bool $out, $mem",[]> { + let mayLoad = 1; + let Defs = [BREG]; + } +} + + //===----------------------------------------------------------------------===// // Floating-Point Instructions //===----------------------------------------------------------------------===// @@ -975,6 +1086,12 @@ def ADD_S : FPArith_RRR<0x00, 0x0A, "add.s", fadd, 1>; def SUB_S : FPArith_RRR<0x01, 0x0A, "sub.s", fsub>; def MUL_S : FPArith_RRR<0x02, 0x0A, "mul.s", fmul, 1>; +let Predicates = [HasSingleFloat] in { + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; +} + // FP load instructions let mayLoad = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { def LSI : RRI8_Inst<0x03, (outs FPR:$t), (ins mem32:$addr), @@ -986,20 +1103,22 @@ let mayLoad = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { let s{3-0} = addr{3-0}; } - def LSIP : RRI8_Inst<0x03, (outs FPR:$t), (ins mem32:$addr), - "lsip\t$t, $addr", []> { - bits<12> addr; + def LSIP : RRI8_Inst<0x03, (outs FPR:$t, AR:$ds), (ins AR:$s, offset8m32:$off), + "lsip\t$t, $s, $off", []> { + bits<10> off; let r = 0x08; - let imm8{7-0} = addr{11-4}; - let s{3-0} = addr{3-0}; + let imm8{7-0} = off{9-2}; + let Constraints = "$ds = $s,@earlyclobber $ds"; } - def LSX : RRR_Inst<0x00, 0x08, 0x00, (outs), (ins FPR:$r, AR:$s, AR:$t), + def LSX : RRR_Inst<0x00, 0x08, 0x00, (outs FPR:$r), (ins AR:$s, AR:$t), "lsx\t$r, $s, $t", []>; - def LSXP : RRR_Inst<0x00, 0x08, 0x01, (outs), (ins FPR:$r, AR:$s, AR:$t), - "lsxp\t$r, $s, $t", []>; + def LSXP : RRR_Inst<0x00, 0x08, 0x01, (outs FPR:$r, AR:$ds), (ins AR:$s, AR:$t), + "lsxp\t$r, $s, $t", []> { + let Constraints = "$ds = $s,@earlyclobber $ds"; + } } def : Pat<(f32 (load addr_ish4:$addr)), (f32 (LSI mem32:$addr))>; @@ -1015,20 +1134,22 @@ let mayStore = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { let s{3-0} = addr{3-0}; } - def SSIP : RRI8_Inst<0x03, (outs), (ins FPR:$t, mem32:$addr), - "ssip\t$t, $addr", []> { - bits<12> addr; + def SSIP : RRI8_Inst<0x03, (outs AR:$ds), (ins FPR:$t, AR:$s, offset8m32:$off), + "ssip\t$t, $s, $off", []> { + bits<10> off; let r = 0x0C; - let imm8{7-0} = addr{11-4}; - let s{3-0} = addr{3-0}; + let imm8{7-0} = off{9-2}; + let Constraints = "$ds = $s,@earlyclobber $ds"; } def SSX: RRR_Inst<0x00, 0x08, 0x04, (outs), (ins FPR:$r, AR:$s, AR:$t), "ssx\t$r, $s, $t", []>; - def SSXP: RRR_Inst<0x00, 0x08, 0x05, (outs), (ins FPR:$r, AR:$s, AR:$t), - "ssxp\t$r, $s, $t", []>; + def SSXP: RRR_Inst<0x00, 0x08, 0x05, (outs AR:$ds), (ins FPR:$r, AR:$s, AR:$t), + "ssxp\t$r, $s, $t", []> { + let Constraints = "$ds = $s,@earlyclobber $ds"; + } } def : Pat<(store FPR:$t, addr_ish4:$addr), (SSI FPR:$t, mem32:$addr)>; @@ -1055,6 +1176,16 @@ def ULT_S : FCompare<0x05, 0x0b, "ult.s", Xtensa_cmpult, 0>; def ULE_S : FCompare<0x07, 0x0b, "ule.s", Xtensa_cmpule, 0>; def UN_S : FCompare<0x01, 0x0b, "un.s", Xtensa_cmpuo, 1>; +let Predicates = [HasSingleFloat] in { + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; +} + def ABS_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "abs.s\t$r, $s", [(set FPR:$r, (fabs FPR:$s))]>, Requires<[HasSingleFloat]> { @@ -1063,15 +1194,22 @@ def ABS_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), def : Pat<(fabs FPR:$s), (ABS_S $s)>; -def ADDEXP_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), +def : Pat<(f32 (int_xtensa_xt_abs_s FPR:$frs)), + (ABS_S FPR:$frs)>, + Requires<[HasSingleFloat]>; + +let Constraints = "$dr = $r,@earlyclobber $dr" in { + +def ADDEXP_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$dr), (ins FPR:$r, FPR:$s), "addexp.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0E; } -def ADDEXPM_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), +def ADDEXPM_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$dr), (ins FPR:$r, FPR:$s), "addexpm.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0F; } +} def CEIL_S : RRR_Inst<0x00, 0x0A, 0x0B, (outs AR:$r), (ins FPR:$s, uimm4:$imm), "ceil.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { @@ -1093,8 +1231,10 @@ def DIV0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), let t = 0x7; } -def DIVN_S : RRR_Inst<0x00, 0x0A, 0x07, (outs FPR:$r), (ins FPR:$s, FPR:$t), - "divn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; +def DIVN_S : RRR_Inst<0x00, 0x0A, 0x07, (outs FPR:$dr), (ins FPR:$r, FPR:$s, FPR:$t), + "divn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]> { +let Constraints = "$dr = $r,@earlyclobber $dr"; +} def FLOAT_S : RRR_Inst<0x00, 0x0A, 0x0c, (outs FPR:$r), (ins AR:$s, uimm4:$imm), "float.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { @@ -1105,6 +1245,10 @@ def FLOAT_S : RRR_Inst<0x00, 0x0A, 0x0c, (outs FPR:$r), (ins AR:$s, uimm4:$imm), def : Pat<(f32 (sint_to_fp AR:$s)), (FLOAT_S AR:$s, 0)>; +def : Pat<(f32 (int_xtensa_xt_float_s i32:$s, timm:$imm)), + (FLOAT_S AR:$s, uimm4:$imm)>, + Requires<[HasSingleFloat]>; + def FLOOR_S : RRR_Inst<0x00, 0x0A, 0x0A, (outs AR:$r), (ins FPR:$s, uimm4:$imm), "floor.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; @@ -1132,9 +1276,10 @@ def MADD_S : RRR_Inst<0x00, 0x0A, 0x04, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR: def : Pat<(fma FPR:$r1, FPR:$r2, FPR:$r3), (MADD_S $r3, $r1, $r2)>; -def MKDADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), +def MKDADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$dr), (ins FPR:$r, FPR:$s), "mkdadj.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0D; + let Constraints = "$dr = $r,@earlyclobber $dr"; } def MKSADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), @@ -1149,23 +1294,30 @@ def MOV_S : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins FPR:$s), let t = 0x00; } -def MOVEQZ_S : RRR_Inst<0x00, 0x0B, 0x08, (outs FPR:$r), (ins FPR:$s, AR:$t), - "moveqz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; +let Constraints = "$dr = $r,@earlyclobber $dr" in { -def MOVF_S : RRR_Inst<0x00, 0x0B, 0x0C, (outs FPR:$r), (ins FPR:$s, BR:$t), - "movf.s\t$r, $s, $t", []>, Requires<[HasBoolean, HasSingleFloat]>; + def MOVEQZ_S : RRR_Inst<0x00, 0x0B, 0x08, (outs FPR:$dr), (ins FPR:$r, FPR:$s, AR:$t), + "moveqz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def MOVGEZ_S : RRR_Inst<0x00, 0x0B, 0x0B, (outs FPR:$r), (ins FPR:$s, AR:$t), - "movgez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + def MOVGEZ_S : RRR_Inst<0x00, 0x0B, 0x0B, (outs FPR:$dr), (ins FPR:$r, FPR:$s, AR:$t), + "movgez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def MOVLTZ_S : RRR_Inst<0x00, 0x0B, 0x0A, (outs FPR:$r), (ins FPR:$s, AR:$t), - "movltz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + def MOVLTZ_S : RRR_Inst<0x00, 0x0B, 0x0A, (outs FPR:$dr), (ins FPR:$r, FPR:$s, AR:$t), + "movltz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def MOVNEZ_S : RRR_Inst<0x00, 0x0B, 0x09, (outs FPR:$r), (ins FPR:$s, AR:$t), - "movnez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + def MOVNEZ_S : RRR_Inst<0x00, 0x0B, 0x09, (outs FPR:$dr), (ins FPR:$r, FPR:$s, AR:$t), + "movnez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def MOVT_S : RRR_Inst<0x00, 0x0B, 0x0D, (outs FPR:$r), (ins FPR:$s, BR:$t), - "movt.s\t$r, $s, $t", []>, Requires<[HasBoolean, HasSingleFloat]>; + def MOVT_S : RRR_Inst<0x00, 0x0B, 0x0D, (outs FPR:$dr), (ins FPR:$r, FPR:$s, BR:$t), + "movt.s\t$r, $s, $t", + [(set FPR:$dr, (int_xtensa_xt_movt_s FPR:$r, FPR:$s, BR:$t))]>, + Requires<[HasBoolean, HasSingleFloat]>; + + def MOVF_S : RRR_Inst<0x00, 0x0B, 0x0C, (outs FPR:$dr), (ins FPR:$r, FPR:$s, BR:$t), + "movf.s\t$r, $s, $t", + [(set FPR:$dr, (int_xtensa_xt_movf_s FPR:$r, FPR:$s, BR:$t))]>, + Requires<[HasBoolean, HasSingleFloat]>; +} // FP multipy-sub def MSUB_S : RRR_Inst<0x00, 0x0A, 0x05, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), @@ -1228,6 +1380,10 @@ def TRUNC_S : RRR_Inst<0x00, 0x0A, 0x09, (outs AR:$r), (ins FPR:$s, uimm4:$imm), def : Pat<(i32 (any_fp_to_sint FPR:$s)), (TRUNC_S FPR:$s, 0)>; +def : Pat<(i32 (int_xtensa_xt_trunc_s f32:$frs, timm:$imm)), + (TRUNC_S FPR:$frs, uimm4:$imm)>, + Requires<[HasSingleFloat]>; + def UFLOAT_S : RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s, uimm4:$imm), "ufloat.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; @@ -1237,6 +1393,10 @@ def UFLOAT_S : RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s, uimm4:$imm) def : Pat<(f32 (uint_to_fp AR:$s)), (UFLOAT_S AR:$s, 0)>; +def : Pat<(f32 (int_xtensa_xt_ufloat_s i32:$s, timm:$imm)), + (UFLOAT_S AR:$s, uimm4:$imm)>, + Requires<[HasSingleFloat]>; + def UTRUNC_S : RRR_Inst<0x00, 0x0A, 0x0e, (outs AR:$r), (ins FPR:$s, uimm4:$imm), "utrunc.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; @@ -1246,6 +1406,10 @@ def UTRUNC_S : RRR_Inst<0x00, 0x0A, 0x0e, (outs AR:$r), (ins FPR:$s, uimm4:$imm) def : Pat<(i32 (any_fp_to_uint FPR:$s)), (UTRUNC_S FPR:$s, 0)>; +def : Pat<(i32 (int_xtensa_xt_utrunc_s f32:$frs, timm:$imm)), + (UTRUNC_S FPR:$frs, uimm4:$imm)>, + Requires<[HasSingleFloat]>; + def WFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins AR:$s), "wfr\t$r, $s", [(set FPR:$r, (bitconvert AR:$s))]>, Requires<[HasSingleFloat]> { @@ -1764,8 +1928,123 @@ let Predicates = [HasESP32S3Ops] in { } include "XtensaS3DSPInstrInfo.td" + +//===----------------------------------------------------------------------===// +// FP intrinsic patterns +//===----------------------------------------------------------------------===// +let Predicates = [HasSingleFloat] in { + + def ADDEXP_S_PAT :Pat<(f32 (int_xtensa_xt_addexp_s FPR:$frr, FPR:$frs)), + (ADDEXP_S FPR:$frr, FPR:$frs)>; + + def ADDEXPM_S_PAT :Pat<(f32 (int_xtensa_xt_addexpm_s FPR:$frr, FPR:$frs)), + (ADDEXPM_S FPR:$frr, FPR:$frs)>; + + def CEIL_S_PAT :Pat<(i32 (int_xtensa_xt_ceil_s FPR:$frs, timm:$imm_t)), + (CEIL_S FPR:$frs, timm:$imm_t)>; + + def DIV0_S_PAT :Pat<(f32 (int_xtensa_xt_div0_s FPR:$frs)), + (DIV0_S FPR:$frs)>; + + def DIVN_S_PAT :Pat<(f32 (int_xtensa_xt_divn_s FPR:$frr, FPR:$frs, FPR:$frt)), + (DIVN_S FPR:$frr, FPR:$frs, FPR:$frt)>; + + def FLOOR_S_PAT :Pat<(i32 (int_xtensa_xt_floor_s FPR:$frs, timm:$imm_t)), + (FLOOR_S FPR:$frs, timm:$imm_t)>; + + def LSI_PAT :Pat<(f32 (int_xtensa_xt_lsi AR:$ars, timm:$imm8x4)), + (LSI AR:$ars, timm:$imm8x4)>; + + def LSX_PAT :Pat<(f32 (int_xtensa_xt_lsx AR:$ars, AR:$art)), + (LSX AR:$ars, AR:$art)>; + + def MADD_S_PAT :Pat<(f32 (int_xtensa_xt_madd_s FPR:$frr, FPR:$frs, FPR:$frt)), + (MADD_S FPR:$frr, FPR:$frs, FPR:$frt)>; + + def MADDN_S_PAT :Pat<(f32 (int_xtensa_xt_maddn_s FPR:$frr, FPR:$frs, FPR:$frt)), + (MADDN_S FPR:$frr, FPR:$frs, FPR:$frt)>; + + def MKDADJ_S_PAT :Pat<(f32 (int_xtensa_xt_mkdadj_s FPR:$frr, FPR:$frs)), + (MKDADJ_S FPR:$frr, FPR:$frs)>; + + def MKSADJ_S_PAT :Pat<(f32 (int_xtensa_xt_mksadj_s FPR:$frs)), + (MKSADJ_S FPR:$frs)>; + + def MOV_S_PAT :Pat<(f32 (int_xtensa_xt_mov_s FPR:$frs)), + (MOV_S FPR:$frs)>; + + def MOVEQZ_S_PAT :Pat<(f32 (int_xtensa_xt_moveqz_s FPR:$frr, FPR:$frs, AR:$art)), + (MOVEQZ_S FPR:$frr, FPR:$frs, AR:$art)>; + + def MOVGEZ_S_PAT :Pat<(f32 (int_xtensa_xt_movgez_s FPR:$frr, FPR:$frs, AR:$art)), + (MOVGEZ_S FPR:$frr, FPR:$frs, AR:$art)>; + + def MOVLTZ_S_PAT :Pat<(f32 (int_xtensa_xt_movltz_s FPR:$frr, FPR:$frs, AR:$art)), + (MOVLTZ_S FPR:$frr, FPR:$frs, AR:$art)>; + + def MOVNEZ_S_PAT :Pat<(f32 (int_xtensa_xt_movnez_s FPR:$frr, FPR:$frs, AR:$art)), + (MOVNEZ_S FPR:$frr, FPR:$frs, AR:$art)>; + + def MSUB_S_PAT :Pat<(f32 (int_xtensa_xt_msub_s FPR:$frr, FPR:$frs, FPR:$frt)), + (MSUB_S FPR:$frr, FPR:$frs, FPR:$frt)>; + + def NEG_S_PAT :Pat<(f32 (int_xtensa_xt_neg_s FPR:$frs)), + (NEG_S FPR:$frs)>; + + def NEXP01_S_PAT :Pat<(f32 (int_xtensa_xt_nexp01_s FPR:$frs)), + (NEXP01_S FPR:$frs)>; + + def RECIP0_S_PAT :Pat<(f32 (int_xtensa_xt_recip0_s FPR:$frs)), + (RECIP0_S FPR:$frs)>; + + def RFR_PAT :Pat<(i32 (int_xtensa_xt_rfr FPR:$frs)), + (RFR FPR:$frs)>; + + def ROUND_S_PAT :Pat<(i32 (int_xtensa_xt_round_s FPR:$frs, timm:$imm_t)), + (ROUND_S FPR:$frs, timm:$imm_t)>; + + def RSQRT0_S_PAT :Pat<(f32 (int_xtensa_xt_rsqrt0_s FPR:$frs)), + (RSQRT0_S FPR:$frs)>; + + def RUR_FCR_PAT :Pat<(i32 (int_xtensa_xt_rur_fcr )), + (RUR FCR)>; + + def RUR_FSR_PAT :Pat<(i32 (int_xtensa_xt_rur_fsr )), + (RUR FSR )>; + + def SQRT0_S_PAT :Pat<(f32 (int_xtensa_xt_sqrt0_s FPR:$frs)), + (SQRT0_S FPR:$frs)>; + + def SSI_PAT :Pat<(int_xtensa_xt_ssi FPR:$frt, AR:$ars, timm:$imm8x4), + (SSI FPR:$frt, AR:$ars, timm:$imm8x4)>; + + def SSIP_PAT :Pat<(i32 (int_xtensa_xt_ssip FPR:$frt, AR:$ars, timm:$imm8x4)), + (SSIP FPR:$frt, AR:$ars, timm:$imm8x4)>; + + def SSX_PAT :Pat<(int_xtensa_xt_ssx FPR:$frr, AR:$ars, AR:$art), + (SSX FPR:$frr, AR:$ars, AR:$art)>; + + def SSXP_PAT :Pat<(i32 (int_xtensa_xt_ssxp FPR:$frr, AR:$ars, AR:$art)), + (SSXP FPR:$frr, AR:$ars, AR:$art)>; + + def WFR_PAT :Pat<(f32 (int_xtensa_xt_wfr AR:$ars)), + (WFR AR:$ars)>; + + def WUR_FCR_PAT :Pat<(int_xtensa_xt_wur_fcr AR:$art), + (WUR_FCR AR:$art)>; + + def WUR_FSR_PAT :Pat<(int_xtensa_xt_wur_fsr AR:$art), + (WUR_FSR AR:$art)>; + +} //===----------------------------------------------------------------------===// // DSP Instructions //===----------------------------------------------------------------------===// include "XtensaDSPInstrInfo.td" +//===----------------------------------------------------------------------===// +// HiFi3 Instructions +//===----------------------------------------------------------------------===// +include "XtensaHIFIInstrFormats.td" +include "XtensaHIFIInstrInfo.td" +include "XtensaHIFIInstrPatterns.td" diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index 6a9bf514be8d3..3d10410a77599 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -21,6 +21,26 @@ class Immediate let ParserMatchClass = !cast(asmop); } +class ImmediateRanged : + Immediate= " # Low # " && Imm <= " # High # " && (Imm % " # Step # " ) == 0;", + asmop> { + let PrintMethod = "printImmOperand<" # Low #"," # High # "," # Step # ">"; + } + +class ImmRangedAsmOperand : + ImmAsmOperand { + let PredicateMethod = "isImmInRange<" # Low #"," # High # "," # Step # ">"; +} + +multiclass ImmRangeDecl { + + def _AsmOperand : ImmRangedAsmOperand; + def NAME : ImmediateRanged; + +} + + // imm8 predicate - Immediate in the range [-128,127] def Imm8_AsmOperand : ImmAsmOperand<"Imm8">; def imm8 : Immediate= -128 && Imm <= 127; }], "Imm8_AsmOperand"> { @@ -107,6 +127,14 @@ def shimm1_31 : Immediate= 1 && Imm <= 31; }], "Shimm1_31_A let DecoderMethod = "decodeShimm1_31Operand"; } +defm imm32n_28: ImmRangeDecl<-32, 28, 4>; +defm imm64n_56: ImmRangeDecl<-64, 56, 8>; +defm imm0_56: ImmRangeDecl<0, 56, 8>; +defm imm16n_14: ImmRangeDecl<-16, 14, 2>; +defm imm16n_47: ImmRangeDecl<-16, 47, 1>; +defm uimm2: ImmRangeDecl<0, 3, 1>; +defm uimm6: ImmRangeDecl<0, 63, 1>; + // Memory offset 0..255 for 8-bit memory accesses def Offset8m8_AsmOperand : ImmAsmOperand<"Offset8m8">; def offset8m8 : Immediate; def offset8m32 : Immediate= 0 && Imm <= 1020 && (Imm & 0x3 == 0); }], - "Offset8m32_AsmOperand">; + "Offset8m32_AsmOperand"> { + let EncoderMethod = "getOffset8m32OpValue"; + let DecoderMethod = "decodeOffset8m32Operand"; +} // Memory offset 0..60 for 32-bit memory accesses def Offset4m32_AsmOperand : ImmAsmOperand<"Offset4m32">; diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index cdc29be5deb3b..37a92331fec3b 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -25,9 +25,9 @@ def SDT_XtensaSelectCC : SDTypeProfile<1, 5, SDTCisVT<5, i32>]>; def SDT_XtensaMOVSP : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; -def SDT_XtensaBrBool : SDTypeProfile<0, 2, [SDTCisVT<0, i1>, SDTCisVT<1, OtherVT>]>; +def SDT_XtensaBrBool : SDTypeProfile<0, 2, [SDTCisVT<0, v1i1>, SDTCisVT<1, OtherVT>]>; def SDT_XtensaBrCCFP : SDTypeProfile<0, 4, [SDTCisVT<0, i32>, SDTCisVT<1, f32>, SDTCisVT<2, f32>, SDTCisVT<3, OtherVT>]>; -def SDT_XtensaCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i1>, SDTCisVT<1, f32>, SDTCisVT<2, f32>]>; +def SDT_XtensaCmp : SDTypeProfile<1, 2, [SDTCisVT<0, v1i1>, SDTCisVT<1, f32>, SDTCisVT<2, f32>]>; def SDT_XtensaMADD : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisVT<0, f32>]>; def SDT_XtensaMOVS : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, f32>]>; def SDT_XtensaSelectCCFP : SDTypeProfile<1, 5, [SDTCisSameAs<0, 3>, SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisVT<5, i32>]>; @@ -48,6 +48,9 @@ def SDT_XtensaLoopEnd : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>; def SDT_XtensaLoopDec : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; def SDT_XtensaLoopBr : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisVT<1, OtherVT>]>; +def SDT_XtensaBuildVec : SDTypeProfile<1, 1, [SDTCisVT<0, v1i64>, SDTCisVT<1, i32>]>; +def SDT_XtensaTRUNC : SDTypeProfile<1, 1, [SDTCisVT<1, i32>, SDTCisVec<0>]>; + //===----------------------------------------------------------------------===// // Node definitions //===----------------------------------------------------------------------===// @@ -102,6 +105,7 @@ def Xtensa_srl: SDNode<"XtensaISD::SRL", SDT_XtensaSRL, [SDNPInGlue]>; def Xtensa_src: SDNode<"XtensaISD::SRC", SDT_XtensaSRC, [SDNPInGlue]>; def Xtensa_ssl: SDNode<"XtensaISD::SSL", SDT_XtensaSSL, [SDNPOutGlue]>; def Xtensa_ssr: SDNode<"XtensaISD::SSR", SDT_XtensaSSR, [SDNPOutGlue]>; +def Xtensa_build_vec: SDNode<"XtensaISD::BUILD_VEC", SDT_XtensaBuildVec, [SDNPInGlue]>; def Xtensa_brjt: SDNode<"XtensaISD::BR_JT", SDT_XtensaBrJT, [SDNPHasChain]>; def Xtensa_callw: SDNode<"XtensaISD::CALLW", SDT_XtensaCall, @@ -118,3 +122,4 @@ def Xtensa_loopdec: SDNode<"XtensaISD::LOOPDEC", SDT_XtensaLoopDec, [SDNPHasChain, SDNPInGlue]>; def Xtensa_loopbr: SDNode<"XtensaISD::LOOPBR", SDT_XtensaLoopBr, [SDNPHasChain, SDNPInGlue]>; +def Xtensa_trunc: SDNode<"XtensaISD::TRUNC", SDT_XtensaTRUNC>; diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp index 6ec817e30ddd3..cea04749849f6 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp @@ -18,6 +18,7 @@ #include "XtensaSubtarget.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" @@ -61,16 +62,30 @@ BitVector XtensaRegisterInfo::getReservedRegs(const MachineFunction &MF) const { // Reserve stack pointer. Reserved.set(Xtensa::SP); + //Reserve QR regs + Reserved.set(Xtensa::Q0); + Reserved.set(Xtensa::Q1); + Reserved.set(Xtensa::Q2); + Reserved.set(Xtensa::Q3); + Reserved.set(Xtensa::Q4); + Reserved.set(Xtensa::Q5); + Reserved.set(Xtensa::Q6); + Reserved.set(Xtensa::Q7); + Reserved.set(Xtensa::BREG); return Reserved; } bool XtensaRegisterInfo::eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, int FrameIndex, - uint64_t StackSize, - int64_t SPOffset) const { + uint64_t StackSize, int64_t SPOffset, + RegScavenger *RS) const { MachineInstr &MI = *II; MachineFunction &MF = *MI.getParent()->getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); + MachineBasicBlock &MBB = *MI.getParent(); + DebugLoc DL = II->getDebugLoc(); + const XtensaInstrInfo &TII = *static_cast( + MBB.getParent()->getSubtarget().getInstrInfo()); const std::vector &CSI = MFI.getCalleeSavedInfo(); int MinCSFI = 0; @@ -116,6 +131,8 @@ bool XtensaRegisterInfo::eliminateFI(MachineBasicBlock::iterator II, case Xtensa::L8I_P: case Xtensa::L8UI: case Xtensa::S8I: + case Xtensa::SPILL_BOOL: + case Xtensa::RESTORE_BOOL: Valid = (Offset >= 0 && Offset <= 255); break; case Xtensa::L16SI: @@ -126,6 +143,46 @@ bool XtensaRegisterInfo::eliminateFI(MachineBasicBlock::iterator II, case Xtensa::LEA_ADD: Valid = (Offset >= -128 && Offset <= 127); break; + case Xtensa::AE_L64_I: + case Xtensa::AE_S64_I: + case Xtensa::AE_S32X2_I: + case Xtensa::AE_L32X2_I: + case Xtensa::AE_S16X4_I: + case Xtensa::AE_L16X4_I: + case Xtensa::AE_LALIGN64_I: + case Xtensa::AE_SALIGN64_I: + Valid = (Offset >= -64 && Offset <= 56); + break; + case Xtensa::AE_S64_IP: + case Xtensa::AE_L64_IP: + case Xtensa::AE_S32X2_IP: + case Xtensa::AE_L32X2_IP: + case Xtensa::AE_S16X4_IP: + case Xtensa::AE_L16X4_IP: + Valid = (Offset >= 0 && Offset <= 56); + break; + case Xtensa::AE_L16X2M_I: + case Xtensa::AE_L16X2M_IU: + case Xtensa::AE_L32F24_I: + case Xtensa::AE_L32F24_IP: + case Xtensa::AE_L32M_I: + case Xtensa::AE_L32M_IU: + case Xtensa::AE_L32_I: + case Xtensa::AE_L32_IP: + case Xtensa::AE_S16X2M_I: + case Xtensa::AE_S16X2M_IU: + case Xtensa::AE_S24RA64S_I: + case Xtensa::AE_S24RA64S_IP: + case Xtensa::AE_S32F24_L_I: + case Xtensa::AE_S32F24_L_IP: + case Xtensa::AE_S32M_I: + case Xtensa::AE_S32M_IU: + case Xtensa::AE_S32RA64S_I: + case Xtensa::AE_S32RA64S_IP: + case Xtensa::AE_S32_L_I: + case Xtensa::AE_S32_L_IP: + Valid = (Offset >= -32 && Offset <= 28); + break; default: // assume that MI is 32-bit load/store operation Valid = (Offset >= 0 && Offset <= 1020) && ((Offset & 0x3) == 0); @@ -152,6 +209,85 @@ bool XtensaRegisterInfo::eliminateFI(MachineBasicBlock::iterator II, IsKill = true; } + unsigned BRegBase = Xtensa::B0; + switch (MI.getOpcode()) { + case Xtensa::SPILL_BOOL: { + Register TempAR = RS->scavengeRegisterBackwards(Xtensa::ARRegClass, II, false, 0); + RS->setRegUsed(TempAR); + + BuildMI(MBB, II, DL, TII.get(Xtensa::RSR), TempAR).addReg(Xtensa::BREG); + MachineOperand &Breg = MI.getOperand(0); + unsigned RegNo = Breg.getReg().id() - BRegBase; + + BuildMI(MBB, II, DL, TII.get(Xtensa::EXTUI), TempAR) + .addReg(TempAR) + .addImm(RegNo) + .addImm(1); + + BuildMI(MBB, II, DL, TII.get(Xtensa::S8I)) + .addReg(TempAR, RegState::Kill) + .addReg(FrameReg, getKillRegState(IsKill)) + .addImm(Offset); + + MI.eraseFromParent(); + return true; + } + case Xtensa::RESTORE_BOOL: { + + Register SrcAR = RS->scavengeRegisterBackwards(Xtensa::ARRegClass, II, false, 0); + RS->setRegUsed(SrcAR); + Register MaskAR = RS->scavengeRegisterBackwards(Xtensa::ARRegClass, II, false, 0); + RS->setRegUsed(MaskAR); + Register BRegAR = RS->scavengeRegisterBackwards(Xtensa::ARRegClass, II, false, 0); + RS->setRegUsed(BRegAR); + + MachineOperand &Breg = MI.getOperand(0); + unsigned RegNo = Breg.getReg().id() - BRegBase; + + BuildMI(MBB, II, DL, TII.get(Xtensa::L8UI), SrcAR) + .addReg(FrameReg, getKillRegState(IsKill)) + .addImm(Offset); + + BuildMI(MBB, II, DL, TII.get(Xtensa::EXTUI), SrcAR) + .addReg(SrcAR) + .addImm(0) + .addImm(1); + + if (RegNo != 0) { + BuildMI(MBB, II, DL, TII.get(Xtensa::SLLI), SrcAR) + .addReg(SrcAR) + .addImm(RegNo); + } + + BuildMI(MBB, II, DL, TII.get(Xtensa::RSR), BRegAR).addReg(Xtensa::BREG); + + unsigned Mask = ~(1 << RegNo) & 0x3ff; + BuildMI(MBB, II, DL, TII.get(Xtensa::MOVI), MaskAR) + .addImm(RegNo < 12 ? Mask : 1); + if (RegNo >= 12) { + BuildMI(MBB, II, DL, TII.get(Xtensa::SLLI), MaskAR) + .addReg(MaskAR) + .addImm(RegNo); + } + BuildMI(MBB, II, DL, TII.get(Xtensa::AND), BRegAR) + .addReg(BRegAR) + .addReg(MaskAR); + + BuildMI(MBB, II, DL, TII.get(Xtensa::OR), BRegAR) + .addReg(SrcAR) + .addReg(BRegAR); + + BuildMI(MBB, II, DL, TII.get(Xtensa::WSR)) + .addReg(Xtensa::BREG, RegState::Define) + .addReg(BRegAR) + .addDef(Breg.getReg(), RegState::Implicit); + + MI.eraseFromParent(); + return true; + } + default: + break; + } MI.getOperand(OpNo).ChangeToRegister(FrameReg, false, false, IsKill); MI.getOperand(OpNo + 1).ChangeToImmediate(Offset); @@ -164,6 +300,8 @@ bool XtensaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, MachineInstr &MI = *II; MachineFunction &MF = *MI.getParent()->getParent(); + assert(RS && "Need register scavenger"); + LLVM_DEBUG(errs() << "\nFunction : " << MF.getName() << "\n"; errs() << "<--------->\n" << MI); @@ -176,7 +314,7 @@ bool XtensaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, << "spOffset : " << spOffset << "\n" << "stackSize : " << stackSize << "\n"); - return eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset); + return eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset, RS); } Register XtensaRegisterInfo::getFrameRegister(const MachineFunction &MF) const { @@ -184,3 +322,9 @@ Register XtensaRegisterInfo::getFrameRegister(const MachineFunction &MF) const { return TFI->hasFP(MF) ? (Subtarget.isWinABI() ? Xtensa::A7 : Xtensa::A15) : Xtensa::SP; } + +bool XtensaRegisterInfo::requiresFrameIndexReplacementScavenging( + const MachineFunction &MF) const { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + return MFI.hasStackObjects(); +} \ No newline at end of file diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h index d5403ff35a197..bd251c1fbff4f 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h @@ -43,6 +43,10 @@ struct XtensaRegisterInfo : public XtensaGenRegisterInfo { bool trackLivenessAfterRegAlloc(const MachineFunction &) const override { return true; } + + bool supportsBackwardScavenger() const override { + return true; + } const uint16_t * getCalleeSavedRegs(const MachineFunction *MF = 0) const override; @@ -56,8 +60,10 @@ struct XtensaRegisterInfo : public XtensaGenRegisterInfo { private: bool eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, - int FrameIndex, uint64_t StackSize, - int64_t SPOffset) const; + int FrameIndex, uint64_t StackSize, int64_t SPOffset, + RegScavenger *RS) const; + + bool requiresFrameIndexReplacementScavenging(const MachineFunction &MF) const override; }; } // end namespace llvm diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index dc2d5abc48758..45a3dfddf53be 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -308,5 +308,71 @@ foreach i = 0-15 in { } // Boolean register class -def BR : RegisterClass<"Xtensa", [i1], 0, (add B0, B1, -B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14, B15)>; +def BR : RegisterClass<"Xtensa", [v1i1], 8, (add B0, B1, +B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14, B15)> { + let Size = 8; +} + +let Namespace = "Xtensa" in { + def bsub0 : SubRegIndex<1>; + def bsub1 : SubRegIndex<1, 1>; + def bsub2 : SubRegIndex<1, 2>; + def bsub3 : SubRegIndex<1, 3>; + + +let SubRegIndices = [bsub0, bsub1] in { + def B0_B1 : RegisterWithSubRegs<"", [B0, B1]>; + def B2_B3 : RegisterWithSubRegs<"", [B2, B3]>; + def B4_B5 : RegisterWithSubRegs<"", [B4, B5]>; + def B6_B7 : RegisterWithSubRegs<"", [B6, B7]>; + def B8_B9 : RegisterWithSubRegs<"", [B8, B9]>; + def B10_B11 : RegisterWithSubRegs<"", [B10, B11]>; + def B12_B13 : RegisterWithSubRegs<"", [B12, B13]>; + def B14_B15 : RegisterWithSubRegs<"", [B14, B15]>; +} + +let SubRegIndices = [bsub0, bsub1, bsub2, bsub3] in { + def B0_B1_B2_B3 : RegisterWithSubRegs<"", [B0, B1, B2, B3]>; + def B4_B5_B6_B7 : RegisterWithSubRegs<"", [B4, B5, B6, B7]>; + def B8_B9_B10_B11 : RegisterWithSubRegs<"", [B8, B9, B10, B11]>; + def B12_B13_B14_B15 : RegisterWithSubRegs<"", [B12, B13, B14, B15]>; +} + +} + +def BR2 : RegisterClass<"Xtensa", [v2i1], 8, (add B0_B1, B2_B3, B4_B5, + B6_B7, B8_B9, B10_B11, + B12_B13, B14_B15)> { + let Size = 8; +} + +def BR4 : RegisterClass<"Xtensa", [v4i1], 8, (add B0_B1_B2_B3, B4_B5_B6_B7, + B8_B9_B10_B11, B12_B13_B14_B15)> { + let Size = 8; +} +//===----------------------------------------------------------------------===// +// HIFI3 vector registers AE_DR +//===----------------------------------------------------------------------===// +class AEDReg num, string n> : XtensaReg { + let HWEncoding{3-0} = num; +} + +foreach i = 0-15 in { + def AED#i : AEDReg, DwarfRegNum<[-1]>; +} + +def AE_DR : RegisterClass<"Xtensa",[v4i16,v2i32,v1i64,v1i32],64, + (sequence "AED%u", 0, 15)>; + +//===----------------------------------------------------------------------===// +// HIFI3 vector alignment registers AE_VALIGN +//===----------------------------------------------------------------------===// +class AEVALIGNReg num, string n> : XtensaReg { + let HWEncoding{1-0} = num; +} + +foreach i = 0-3 in { + def U#i : AEVALIGNReg, DwarfRegNum<[-1]>; +} + +def AE_VALIGN : RegisterClass<"Xtensa",[v8i8], 64, (sequence "U%u", 0, 3)>; \ No newline at end of file diff --git a/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td index 557463fd461c4..b384c8f42c426 100644 --- a/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td @@ -113,6 +113,8 @@ def EE_CMUL_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let Inst{10-6} = 0x3; let Inst{5-4} = sel4{1-0}; let Inst{3-0} = as{3-0}; + + let Constraints = "$asr = $as"; } let usesCustomInserter = 1 in @@ -140,6 +142,8 @@ def EE_CMUL_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let Inst{7-6} = 0x0; let Inst{5-4} = sel4{1-0}; let Inst{3-0} = as{3-0}; + + let Constraints = "$asr = $as"; } let usesCustomInserter = 1 in diff --git a/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp index 301d225c6442e..181a5ae59ee31 100644 --- a/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp @@ -39,7 +39,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_andq first argument, it must " "be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -56,8 +56,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( const TargetRegisterClass *RC = getRegClassFor(MVT::i32); unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AX.getReg()); MI.eraseFromParent(); @@ -79,7 +79,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "must be in range [0,7]"); MachineOperand &SEL4 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) .addImm(SEL4.getImm()); @@ -110,9 +110,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL4 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -144,8 +144,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL4 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index e267b71385c0f..e5639ce706c86 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -77,6 +77,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasMiscSR = false; HasESP32S2Ops = false; HasESP32S3Ops = false; + HasHIFI3 = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index 03378d8ea032a..e794edec3f5c5 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -134,6 +134,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa esp32-s3 ISA extension bool HasESP32S3Ops; + // Enable Xtensa HIFI3 Extension + bool HasHIFI3; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -217,6 +220,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool useTextSectionLiterals() const; + bool hasHIFI3() const { return HasHIFI3; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 293c3adf43010..7fb90371c9c33 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -34,7 +34,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXtensaTarget() { static std::string computeDataLayout(const Triple &TT, StringRef CPU, const TargetOptions &Options, bool IsLittle) { - std::string Ret = "e-m:e-p:32:32-i64:64-i128:128-n32"; + std::string Ret = "e-m:e-p:32:32-v1:8:8-i64:64-i128:128-n32"; return Ret; } @@ -141,6 +141,7 @@ void XtensaPassConfig::addPreRegAlloc() { void XtensaPassConfig::addPreEmitPass() { addPass(createXtensaPSRAMCacheFixPass()); + addPass(createXtensaBRegFixupPass()); addPass(createXtensaSizeReductionPass()); addPass(createXtensaFixupHwLoops()); addPass(&BranchRelaxationPassID); diff --git a/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll b/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll index c4a0749a0ed1e..ee492583bd72d 100644 --- a/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll +++ b/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 --exception-model=dwarf -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s declare i32 @llvm.abs.i32(i32, i1) diff --git a/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll b/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll index 10714e6f0cbf3..14a0bb115f438 100644 --- a/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll +++ b/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll @@ -1,6 +1,6 @@ -; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs --exception-model=dwarf < %s \ ; RUN: | FileCheck -check-prefix=XTENSA-STRUCT16 %s -; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs --exception-model=dwarf < %s \ ; RUN: | FileCheck -check-prefix=XTENSA-I128 %s %struct.S = type { [4 x i32] } diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll index 8008ba354e6ab..a265b1f79913a 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 --exception-model=dwarf -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s declare i32 @llvm.ctlz.i32(i32, i1) diff --git a/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll b/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll index e6faa89c0ec59..0206120729abe 100644 --- a/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll +++ b/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 --exception-model=dwarf -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s declare i32 @llvm.smin.i32(i32, i32) diff --git a/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll b/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll new file mode 100644 index 0000000000000..48d3424a8c62f --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll @@ -0,0 +1,78 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + + + +define float @test_float_s(i32 %a) { + ; CHECK-LABEL: test_float_s + ; CHECK: float.s {{f[0-9]+}}, a2, 1 + %r = call float @llvm.xtensa.xt.float.s(i32 %a, i32 1) + ret float %r +} + +define float @test_ufloat_s(i32 %a) { + ; CHECK-LABEL: test_ufloat_s + ; CHECK: ufloat.s {{f[0-9]+}}, a2, 1 + %r = call float @llvm.xtensa.xt.ufloat.s(i32 %a, i32 1) + ret float %r +} + +define i32 @test_trunc_s(float %a) { + ; CHECK-LABEL: test_trunc_s + ; CHECK: trunc.s a2, {{f[0-9]+}}, 1 + %r = call i32 @llvm.xtensa.xt.trunc.s(float %a, i32 1) + ret i32 %r +} + +define i32 @test_utrunc_s(float %a) { + ; CHECK-LABEL: test_utrunc_s + ; CHECK: trunc.s a2, {{f[0-9]+}}, 1 + %r = call i32 @llvm.xtensa.xt.utrunc.s(float %a, i32 1) + ret i32 %r +} + +define float @test_add_s(float %a, float %b) { + ; CHECK-LABEL: test_add_s + ; CHECK: add.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}} + %r = call float @llvm.xtensa.xt.add.s(float %a, float %b) + ret float %r +} + +define float @test_sub_s(float %a, float %b) { + ; CHECK-LABEL: test_sub_s + ; CHECK: sub.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}} + %r = call float @llvm.xtensa.xt.sub.s(float %a, float %b) + ret float %r +} + +define float @test_mul_s(float %a, float %b) { + ; CHECK-LABEL: test_mul_s + ; CHECK: mul.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}} + %r = call float @llvm.xtensa.xt.mul.s(float %a, float %b) + ret float %r +} + +define float @test_lsxp(ptr %a, i32 %b) { + ; CHECK-LABEL: test_lsxp + ; CHECK: lsxp {{f[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}} + %s = call {float,ptr} @llvm.xtensa.xt.lsxp(ptr %a, i32 %b) + %r = extractvalue {float,ptr} %s , 0 + ret float %r +} + +define float @test_lsip(ptr %a) { + ; CHECK-LABEL: test_lsip + ; CHECK: lsip {{f[0-9]+}}, {{a[0-9]+}}, 4 + %s = call {float,ptr} @llvm.xtensa.xt.lsip(ptr %a, i32 4) + %r = extractvalue {float,ptr} %s , 0 + ret float %r +} + +declare {float,ptr} @llvm.xtensa.xt.lsip(ptr, i32); +declare {float,ptr} @llvm.xtensa.xt.lsxp(ptr, i32); +declare float @llvm.xtensa.xt.float.s(i32, i32); +declare float @llvm.xtensa.xt.ufloat.s(i32, i32); +declare i32 @llvm.xtensa.xt.trunc.s(float , i32); +declare i32 @llvm.xtensa.xt.utrunc.s(float, i32); +declare float @llvm.xtensa.xt.add.s(float %a, float %b); +declare float @llvm.xtensa.xt.sub.s(float %a, float %b); +declare float @llvm.xtensa.xt.mul.s(float %a, float %b); diff --git a/llvm/test/CodeGen/Xtensa/xtensa-gen-intrinsics.ll b/llvm/test/CodeGen/Xtensa/xtensa-gen-intrinsics.ll new file mode 100644 index 0000000000000..e3d3ff9b92516 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-gen-intrinsics.ll @@ -0,0 +1,80 @@ +# RUN: python3 %s > %t && ( llc -O0 -mtriple=xtensa -mcpu=esp32 %t -o - | FileCheck %t ) + +from dataclasses import dataclass + +@dataclass +class F: + ret: str + fun : str + instr: str + args : [str] + +FIXTURES = [ +('float', 'xt_addexp_s', 'addexp.s {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float']) , +('float', 'xt_addexpm_s', 'addexpm.s {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float']) , +('i32', 'xt_ceil_s', 'ceil.s {{a[0-9]+}}, {{f[0-9]+}}, 0', ['float', 0]) , +('float', 'xt_div0_s', 'div0.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_divn_s', 'divn.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float', 'float']) , +('i32', 'xt_floor_s', 'floor.s {{a[0-9]+}}, {{f[0-9]+}}, 0', ['float', 0]) , +('float', 'xt_lsi', 'lsi {{f[0-9]+}}, {{a[0-9]+}}, 0', ['ptr', 0]) , +# skip xt_lsip , +('float', 'xt_lsx', 'lsx {{f[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}}', ['ptr', 'i32']) , +# skip xt_lsxp , +('float', 'xt_madd_s', 'madd.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float', 'float']) , +('float', 'xt_maddn_s', 'maddn.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float', 'float']) , +('float', 'xt_mkdadj_s', 'mkdadj.s {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float']) , +('float', 'xt_mksadj_s', 'mksadj.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_mov_s', 'mov.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_moveqz_s', 'moveqz.s {{f[0-9]+}}, {{f[0-9]+}}, {{a[0-9]+}}', ['float', 'float', 'i32']) , +('float', 'xt_movgez_s', 'movgez.s {{f[0-9]+}}, {{f[0-9]+}}, {{a[0-9]+}}', ['float', 'float', 'i32']) , +('float', 'xt_movltz_s', 'movltz.s {{f[0-9]+}}, {{f[0-9]+}}, {{a[0-9]+}}', ['float', 'float', 'i32']) , +('float', 'xt_movnez_s', 'movnez.s {{f[0-9]+}}, {{f[0-9]+}}, {{a[0-9]+}}', ['float', 'float', 'i32']) , +('float', 'xt_msub_s', 'msub.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float', 'float']) , +('float', 'xt_neg_s', 'neg.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_nexp01_s', 'nexp01.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_recip0_s', 'recip0.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('i32', 'xt_rfr', 'rfr {{a[0-9]+}}, {{f[0-9]+}}', ['float']) , +('i32', 'xt_round_s', 'round.s {{a[0-9]+}}, {{f[0-9]+}}, 0', ['float', 0]) , +('float', 'xt_rsqrt0_s', 'rsqrt0.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('i32', 'xt_rur_fcr', 'rur {{a[0-9]+}}, fcr', []) , +('i32', 'xt_rur_fsr', 'rur {{a[0-9]+}}, fsr', []) , +('float', 'xt_sqrt0_s', 'sqrt0.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('void', 'xt_ssi', 'ssi {{f[0-9]+}}, {{a[0-9]+}}, 0', ['float', 'ptr', 0]) , +('ptr', 'xt_ssip', 'ssip {{f[0-9]+}}, {{a[0-9]+}}, 0', ['float', 'ptr', 0]) , +('void', 'xt_ssx', 'ssx {{f[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}}', ['float', 'ptr', 'i32']) , +('ptr', 'xt_ssxp', 'ssxp {{f[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}}', ['float', 'ptr', 'i32']) , +('float', 'xt_wfr', 'wfr {{f[0-9]+}}, {{a[0-9]+}}', ['i32']) , +('void', 'xt_wur_fcr', 'wur.fcr {{a[0-9]+}}', ['i32']) , +('void', 'xt_wur_fsr', 'wur.fsr {{a[0-9]+}}', ['i32']) , +] + +template = """ +define {ret} @test_{fun}({fun_args}) {{ + ; CHECK-LABEL: {fun} + ; CHECK: {instr} + {ret_var} {assign} call {ret} @llvm.xtensa.{builtin}({call_args}) + ret {ret} {ret_var} +}} +declare {ret} @llvm.xtensa.{builtin}({call_types}); +""" + +for f in FIXTURES: + if isinstance(f, dict): + f = F(**f) + elif isinstance(f, tuple): + f = F(*f) + args = f.args + f.fun_args = ",".join( + ['%s %%a%d' % (a,i) for i,a, in enumerate(args) if isinstance(a,str)]) + f.builtin = f.fun.replace('_','.') + f.call_args = ",".join( + [('%s %%a%d' % (a, i)) if isinstance(a,str) else ('i32 %d' % a) + for i,a, in enumerate(args)]) + f.call_types = ",".join([a if isinstance(a,str) else 'i32' for a in args]) + if f.ret == 'void': + f.assign = "" + f.ret_var = "" + else: + f.assign = "=" + f.ret_var = "%r" + print(template.format(**f.__dict__)) diff --git a/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll b/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll new file mode 100644 index 0000000000000..fd1fa8dcf06d1 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll @@ -0,0 +1,91 @@ +# RUN: python3 %s > %t && ( llc -mtriple=xtensa -mcpu=cnl %t -o - | FileCheck %t ) + + +FIXTURES = [ + ('add','ae_add64','<1 x i64>'), + ('add','ae_add32','<1 x i32>'), + ('add','ae_add32','<2 x i32>'), + ('add','ae_add16','<4 x i16>'), + + ('sub','ae_sub64','<1 x i64>'), + ('sub','ae_sub32','<1 x i32>'), + ('sub','ae_sub32','<2 x i32>'), + ('sub','ae_sub16','<4 x i16>'), + + ('mul','ae_mulp32x2','<2 x i32>'), + ('mul','ae_mulp32x2','<1 x i32>'), + ('mul','ae_mul16x4','<4 x i16>'), +] + +REG_TYPES = ['<1 x i64>', '<2 x i32>', '<1 x i32>', '<4 x i16>'] + +BITWISE_OPS = [ + ('and', 'ae_and'), + ('or', 'ae_or'), + ('xor', 'ae_xor') +] + +from dataclasses import dataclass + +@dataclass +class F: + op: str + instr: str + type : str + +template = """ +define {type} @test_{fun}({type} %a, {type} %b) {{ + ; CHECK-LABEL: test_{fun} + ; CHECK: {instr} aed0, {{{{aed[01]}}}}, {{{{aed[01]}}}} + %r = {op} {type} %a, %b + ret {type} %r +}} +""" + +def v2s(typ): + return typ.strip('<>').replace(' ','') + +for f in FIXTURES: + f = F(*f) + f.fun = f.op + v2s(f.type) + print(template.format(**f.__dict__)) + +for f in BITWISE_OPS: + op, instr = f + for typ in REG_TYPES: + fun = op + v2s(typ) + print(template.format(op=op, instr=instr,fun=fun,type=typ)) + +cmp_template = """ +define {vtype} @test_sel_{fun}({ctype} %a, {ctype} %b, {vtype} %t, {vtype} %f) {{ + ; CHECK-LABEL: test_sel_{fun} + ; CHECK-DAG: ae_movda32 {{{{aed[0-9]+}}}}, {{{{a[0-9]+}}}} + ; CHECK-DAG: ae_movda32 {{{{aed[0-9]+}}}}, {{{{a[0-9]+}}}} + ; CHECK: {cmp_inst} {{{{b[0-9]+}}}}, {{{{aed[0-9]+}}}}, {{{{aed[0-9]+}}}} + ; CHECK: {mov_inst} {{{{aed[0-9]+}}}}, {{{{aed[0-9]+}}}}, {{{{b[0-9]+}}}} + %cmp = icmp {cmp_bc} {ctype} %a, %b + %cond = select i1 %cmp, {vtype} %f, {vtype} %t + ret {vtype} %cond +}} +""" + +CMP_FIXTURES = [ + ('eq','ae_eq64', 'ae_movt64'), + ('ne','ae_eq64', 'ae_movf64'), + ('ugt','ae_le64', 'ae_movf64'), + ('uge','ae_lt64', 'ae_movf64'), + ('ult','ae_lt64', 'ae_movt64'), + ('ule','ae_le64', 'ae_movt64'), + ('sgt','ae_le64', 'ae_movf64'), + ('sge','ae_lt64', 'ae_movf64'), + ('slt','ae_lt64', 'ae_movt64'), + ('sle','ae_le64', 'ae_movt64'), +] + +SCALARS = "i32 i16 i8".split() + +for cmp_bc, cmp_inst, mov_inst in CMP_FIXTURES: + for ctype in SCALARS: + for vtype in REG_TYPES: + fun = '_'.join((cmp_bc, ctype, v2s(vtype))) + print(cmp_template.format(**locals())) diff --git a/llvm/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.ll b/llvm/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.ll new file mode 100644 index 0000000000000..14925a5e1d472 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.ll @@ -0,0 +1,12298 @@ +; RUN: split-file %s %t +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_abs16s.ll -o - | FileCheck %t/ae_abs16s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_abs24s.ll -o - | FileCheck %t/ae_abs24s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_abs32.ll -o - | FileCheck %t/ae_abs32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_abs32s.ll -o - | FileCheck %t/ae_abs32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_abs64.ll -o - | FileCheck %t/ae_abs64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_abs64s.ll -o - | FileCheck %t/ae_abs64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_add16.ll -o - | FileCheck %t/ae_add16.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_add16s.ll -o - | FileCheck %t/ae_add16s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_add24s.ll -o - | FileCheck %t/ae_add24s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_add32.ll -o - | FileCheck %t/ae_add32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_add32_hl_lh.ll -o - | FileCheck %t/ae_add32_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_add32s.ll -o - | FileCheck %t/ae_add32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_add64.ll -o - | FileCheck %t/ae_add64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_add64s.ll -o - | FileCheck %t/ae_add64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_addbrba32.ll -o - | FileCheck %t/ae_addbrba32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_addsub32.ll -o - | FileCheck %t/ae_addsub32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_addsub32s.ll -o - | FileCheck %t/ae_addsub32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_and.ll -o - | FileCheck %t/ae_and.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvt32x2f16_10.ll -o - | FileCheck %t/ae_cvt32x2f16_10.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvt32x2f16_32.ll -o - | FileCheck %t/ae_cvt32x2f16_32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvt48a32.ll -o - | FileCheck %t/ae_cvt48a32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvt64a32.ll -o - | FileCheck %t/ae_cvt64a32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvt64f32_h.ll -o - | FileCheck %t/ae_cvt64f32_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvta32f24s_h.ll -o - | FileCheck %t/ae_cvta32f24s_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvta32f24s_l.ll -o - | FileCheck %t/ae_cvta32f24s_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvtq56a32s.ll -o - | FileCheck %t/ae_cvtq56a32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvtq56p32s_h.ll -o - | FileCheck %t/ae_cvtq56p32s_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_cvtq56p32s_l.ll -o - | FileCheck %t/ae_cvtq56p32s_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_db.ll -o - | FileCheck %t/ae_db.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_db_ic.ll -o - | FileCheck %t/ae_db_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_db_ip.ll -o - | FileCheck %t/ae_db_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_dbi.ll -o - | FileCheck %t/ae_dbi.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_dbi_ic.ll -o - | FileCheck %t/ae_dbi_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_dbi_ip.ll -o - | FileCheck %t/ae_dbi_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_div64d32_h.ll -o - | FileCheck %t/ae_div64d32_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_div64d32_l.ll -o - | FileCheck %t/ae_div64d32_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_eq64.ll -o - | FileCheck %t/ae_eq64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16_i.ll -o - | FileCheck %t/ae_l16_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16_ip.ll -o - | FileCheck %t/ae_l16_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16_x.ll -o - | FileCheck %t/ae_l16_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16_xc.ll -o - | FileCheck %t/ae_l16_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16_xp.ll -o - | FileCheck %t/ae_l16_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16m_i.ll -o - | FileCheck %t/ae_l16m_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16m_iu.ll -o - | FileCheck %t/ae_l16m_iu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16m_x.ll -o - | FileCheck %t/ae_l16m_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16m_xc.ll -o - | FileCheck %t/ae_l16m_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16m_xu.ll -o - | FileCheck %t/ae_l16m_xu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x2m_i.ll -o - | FileCheck %t/ae_l16x2m_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x2m_iu.ll -o - | FileCheck %t/ae_l16x2m_iu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x2m_x.ll -o - | FileCheck %t/ae_l16x2m_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x2m_xc.ll -o - | FileCheck %t/ae_l16x2m_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x2m_xu.ll -o - | FileCheck %t/ae_l16x2m_xu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x4_i.ll -o - | FileCheck %t/ae_l16x4_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x4_ip.ll -o - | FileCheck %t/ae_l16x4_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x4_ric.ll -o - | FileCheck %t/ae_l16x4_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x4_rip.ll -o - | FileCheck %t/ae_l16x4_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x4_x.ll -o - | FileCheck %t/ae_l16x4_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x4_xc.ll -o - | FileCheck %t/ae_l16x4_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l16x4_xp.ll -o - | FileCheck %t/ae_l16x4_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32_i.ll -o - | FileCheck %t/ae_l32_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32_ip.ll -o - | FileCheck %t/ae_l32_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32_x.ll -o - | FileCheck %t/ae_l32_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32_xc.ll -o - | FileCheck %t/ae_l32_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32_xp.ll -o - | FileCheck %t/ae_l32_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32f24_i.ll -o - | FileCheck %t/ae_l32f24_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32f24_ip.ll -o - | FileCheck %t/ae_l32f24_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32f24_x.ll -o - | FileCheck %t/ae_l32f24_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32f24_xc.ll -o - | FileCheck %t/ae_l32f24_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32f24_xp.ll -o - | FileCheck %t/ae_l32f24_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32m_i.ll -o - | FileCheck %t/ae_l32m_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32m_iu.ll -o - | FileCheck %t/ae_l32m_iu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32m_x.ll -o - | FileCheck %t/ae_l32m_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32m_xc.ll -o - | FileCheck %t/ae_l32m_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32m_xu.ll -o - | FileCheck %t/ae_l32m_xu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2_i.ll -o - | FileCheck %t/ae_l32x2_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2_ip.ll -o - | FileCheck %t/ae_l32x2_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2_ric.ll -o - | FileCheck %t/ae_l32x2_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2_rip.ll -o - | FileCheck %t/ae_l32x2_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2_x.ll -o - | FileCheck %t/ae_l32x2_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2_xc.ll -o - | FileCheck %t/ae_l32x2_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2_xp.ll -o - | FileCheck %t/ae_l32x2_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2f24_i.ll -o - | FileCheck %t/ae_l32x2f24_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2f24_ip.ll -o - | FileCheck %t/ae_l32x2f24_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2f24_ric.ll -o - | FileCheck %t/ae_l32x2f24_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2f24_rip.ll -o - | FileCheck %t/ae_l32x2f24_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2f24_x.ll -o - | FileCheck %t/ae_l32x2f24_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2f24_xc.ll -o - | FileCheck %t/ae_l32x2f24_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l32x2f24_xp.ll -o - | FileCheck %t/ae_l32x2f24_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l64_i.ll -o - | FileCheck %t/ae_l64_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l64_ip.ll -o - | FileCheck %t/ae_l64_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l64_x.ll -o - | FileCheck %t/ae_l64_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l64_xc.ll -o - | FileCheck %t/ae_l64_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_l64_xp.ll -o - | FileCheck %t/ae_l64_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la16x4_ic.ll -o - | FileCheck %t/ae_la16x4_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la16x4_ip.ll -o - | FileCheck %t/ae_la16x4_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la16x4_ric.ll -o - | FileCheck %t/ae_la16x4_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la16x4_rip.ll -o - | FileCheck %t/ae_la16x4_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la16x4neg_pc.ll -o - | FileCheck %t/ae_la16x4neg_pc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la16x4pos_pc.ll -o - | FileCheck %t/ae_la16x4pos_pc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24_ic.ll -o - | FileCheck %t/ae_la24_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24_ip.ll -o - | FileCheck %t/ae_la24_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24_ric.ll -o - | FileCheck %t/ae_la24_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24_rip.ll -o - | FileCheck %t/ae_la24_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24neg_pc.ll -o - | FileCheck %t/ae_la24neg_pc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24pos_pc.ll -o - | FileCheck %t/ae_la24pos_pc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24x2_ic.ll -o - | FileCheck %t/ae_la24x2_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24x2_ip.ll -o - | FileCheck %t/ae_la24x2_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24x2_ric.ll -o - | FileCheck %t/ae_la24x2_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24x2_rip.ll -o - | FileCheck %t/ae_la24x2_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24x2neg_pc.ll -o - | FileCheck %t/ae_la24x2neg_pc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la24x2pos_pc.ll -o - | FileCheck %t/ae_la24x2pos_pc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2_ic.ll -o - | FileCheck %t/ae_la32x2_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2_ip.ll -o - | FileCheck %t/ae_la32x2_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2_ric.ll -o - | FileCheck %t/ae_la32x2_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2_rip.ll -o - | FileCheck %t/ae_la32x2_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2f24_ic.ll -o - | FileCheck %t/ae_la32x2f24_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2f24_ip.ll -o - | FileCheck %t/ae_la32x2f24_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2f24_ric.ll -o - | FileCheck %t/ae_la32x2f24_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2f24_rip.ll -o - | FileCheck %t/ae_la32x2f24_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2neg_pc.ll -o - | FileCheck %t/ae_la32x2neg_pc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la32x2pos_pc.ll -o - | FileCheck %t/ae_la32x2pos_pc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_la64_pp.ll -o - | FileCheck %t/ae_la64_pp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_lalign64_i.ll -o - | FileCheck %t/ae_lalign64_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_lb.ll -o - | FileCheck %t/ae_lb.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_lbi.ll -o - | FileCheck %t/ae_lbi.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_lbk.ll -o - | FileCheck %t/ae_lbk.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_lbki.ll -o - | FileCheck %t/ae_lbki.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_lbs.ll -o - | FileCheck %t/ae_lbs.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_lbsi.ll -o - | FileCheck %t/ae_lbsi.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_le64.ll -o - | FileCheck %t/ae_le64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_lt64.ll -o - | FileCheck %t/ae_lt64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_max32.ll -o - | FileCheck %t/ae_max32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_max64.ll -o - | FileCheck %t/ae_max64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_maxabs32s.ll -o - | FileCheck %t/ae_maxabs32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_maxabs64s.ll -o - | FileCheck %t/ae_maxabs64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_min32.ll -o - | FileCheck %t/ae_min32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_min64.ll -o - | FileCheck %t/ae_min64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_minabs32s.ll -o - | FileCheck %t/ae_minabs32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_minabs64s.ll -o - | FileCheck %t/ae_minabs64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mov.ll -o - | FileCheck %t/ae_mov.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movad16_0.ll -o - | FileCheck %t/ae_movad16_0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movad16_1.ll -o - | FileCheck %t/ae_movad16_1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movad16_2.ll -o - | FileCheck %t/ae_movad16_2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movad16_3.ll -o - | FileCheck %t/ae_movad16_3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movad32_h.ll -o - | FileCheck %t/ae_movad32_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movad32_l.ll -o - | FileCheck %t/ae_movad32_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movalign.ll -o - | FileCheck %t/ae_movalign.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movda16.ll -o - | FileCheck %t/ae_movda16.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movda16x2.ll -o - | FileCheck %t/ae_movda16x2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movda32.ll -o - | FileCheck %t/ae_movda32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movda32x2.ll -o - | FileCheck %t/ae_movda32x2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movf64.ll -o - | FileCheck %t/ae_movf64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movi.ll -o - | FileCheck %t/ae_movi.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_movt64.ll -o - | FileCheck %t/ae_movt64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul16x4.ll -o - | FileCheck %t/ae_mul16x4.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32_hh.ll -o - | FileCheck %t/ae_mul32_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32_lh.ll -o - | FileCheck %t/ae_mul32_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32_ll.ll -o - | FileCheck %t/ae_mul32_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32_ll_s2.ll -o - | FileCheck %t/ae_mul32_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32u_ll.ll -o - | FileCheck %t/ae_mul32u_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_h0.ll -o - | FileCheck %t/ae_mul32x16_h0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_h0_s2.ll -o - | FileCheck %t/ae_mul32x16_h0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_h1.ll -o - | FileCheck %t/ae_mul32x16_h1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_h1_s2.ll -o - | FileCheck %t/ae_mul32x16_h1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_h2.ll -o - | FileCheck %t/ae_mul32x16_h2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_h2_s2.ll -o - | FileCheck %t/ae_mul32x16_h2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_h3.ll -o - | FileCheck %t/ae_mul32x16_h3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_h3_s2.ll -o - | FileCheck %t/ae_mul32x16_h3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_l0.ll -o - | FileCheck %t/ae_mul32x16_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_l0_s2.ll -o - | FileCheck %t/ae_mul32x16_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_l1.ll -o - | FileCheck %t/ae_mul32x16_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_l1_s2.ll -o - | FileCheck %t/ae_mul32x16_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_l2.ll -o - | FileCheck %t/ae_mul32x16_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_l2_s2.ll -o - | FileCheck %t/ae_mul32x16_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_l3.ll -o - | FileCheck %t/ae_mul32x16_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mul32x16_l3_s2.ll -o - | FileCheck %t/ae_mul32x16_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula16x4.ll -o - | FileCheck %t/ae_mula16x4.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32_hh.ll -o - | FileCheck %t/ae_mula32_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32_lh.ll -o - | FileCheck %t/ae_mula32_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32_ll.ll -o - | FileCheck %t/ae_mula32_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32_ll_s2.ll -o - | FileCheck %t/ae_mula32_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32u_ll.ll -o - | FileCheck %t/ae_mula32u_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_h0.ll -o - | FileCheck %t/ae_mula32x16_h0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_h0_s2.ll -o - | FileCheck %t/ae_mula32x16_h0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_h1.ll -o - | FileCheck %t/ae_mula32x16_h1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_h1_s2.ll -o - | FileCheck %t/ae_mula32x16_h1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_h2.ll -o - | FileCheck %t/ae_mula32x16_h2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_h2_s2.ll -o - | FileCheck %t/ae_mula32x16_h2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_h3.ll -o - | FileCheck %t/ae_mula32x16_h3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_h3_s2.ll -o - | FileCheck %t/ae_mula32x16_h3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_l0.ll -o - | FileCheck %t/ae_mula32x16_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_l0_s2.ll -o - | FileCheck %t/ae_mula32x16_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_l1.ll -o - | FileCheck %t/ae_mula32x16_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_l1_s2.ll -o - | FileCheck %t/ae_mula32x16_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_l2.ll -o - | FileCheck %t/ae_mula32x16_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_l2_s2.ll -o - | FileCheck %t/ae_mula32x16_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_l3.ll -o - | FileCheck %t/ae_mula32x16_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mula32x16_l3_s2.ll -o - | FileCheck %t/ae_mula32x16_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad24_hh_ll.ll -o - | FileCheck %t/ae_mulaad24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulaad24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad24_hl_lh.ll -o - | FileCheck %t/ae_mulaad24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulaad24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad32x16_h0_l1.ll -o - | FileCheck %t/ae_mulaad32x16_h0_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad32x16_h0_l1_s2.ll -o - | FileCheck %t/ae_mulaad32x16_h0_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad32x16_h1_l0.ll -o - | FileCheck %t/ae_mulaad32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulaad32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad32x16_h2_l3.ll -o - | FileCheck %t/ae_mulaad32x16_h2_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad32x16_h2_l3_s2.ll -o - | FileCheck %t/ae_mulaad32x16_h2_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad32x16_h3_l2.ll -o - | FileCheck %t/ae_mulaad32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaad32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulaad32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd16ss_11_00.ll -o - | FileCheck %t/ae_mulaafd16ss_11_00.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd16ss_11_00_s2.ll -o - | FileCheck %t/ae_mulaafd16ss_11_00_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd16ss_13_02.ll -o - | FileCheck %t/ae_mulaafd16ss_13_02.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd16ss_13_02_s2.ll -o - | FileCheck %t/ae_mulaafd16ss_13_02_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd16ss_33_22.ll -o - | FileCheck %t/ae_mulaafd16ss_33_22.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd16ss_33_22_s2.ll -o - | FileCheck %t/ae_mulaafd16ss_33_22_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd24_hh_ll.ll -o - | FileCheck %t/ae_mulaafd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulaafd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd24_hl_lh.ll -o - | FileCheck %t/ae_mulaafd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulaafd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd32x16_h0_l1.ll -o - | FileCheck %t/ae_mulaafd32x16_h0_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd32x16_h0_l1_s2.ll -o - | FileCheck %t/ae_mulaafd32x16_h0_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulaafd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulaafd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd32x16_h2_l3.ll -o - | FileCheck %t/ae_mulaafd32x16_h2_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd32x16_h2_l3_s2.ll -o - | FileCheck %t/ae_mulaafd32x16_h2_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulaafd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaafd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulaafd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulac24.ll -o - | FileCheck %t/ae_mulac24.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulac32x16_h.ll -o - | FileCheck %t/ae_mulac32x16_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulac32x16_l.ll -o - | FileCheck %t/ae_mulac32x16_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_00.ll -o - | FileCheck %t/ae_mulaf16ss_00.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_00_s2.ll -o - | FileCheck %t/ae_mulaf16ss_00_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_10.ll -o - | FileCheck %t/ae_mulaf16ss_10.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_11.ll -o - | FileCheck %t/ae_mulaf16ss_11.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_20.ll -o - | FileCheck %t/ae_mulaf16ss_20.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_21.ll -o - | FileCheck %t/ae_mulaf16ss_21.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_22.ll -o - | FileCheck %t/ae_mulaf16ss_22.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_30.ll -o - | FileCheck %t/ae_mulaf16ss_30.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_31.ll -o - | FileCheck %t/ae_mulaf16ss_31.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_32.ll -o - | FileCheck %t/ae_mulaf16ss_32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16ss_33.ll -o - | FileCheck %t/ae_mulaf16ss_33.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf16x4ss.ll -o - | FileCheck %t/ae_mulaf16x4ss.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32r_hh.ll -o - | FileCheck %t/ae_mulaf32r_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32r_lh.ll -o - | FileCheck %t/ae_mulaf32r_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32r_ll.ll -o - | FileCheck %t/ae_mulaf32r_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32r_ll_s2.ll -o - | FileCheck %t/ae_mulaf32r_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32s_hh.ll -o - | FileCheck %t/ae_mulaf32s_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32s_lh.ll -o - | FileCheck %t/ae_mulaf32s_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32s_ll.ll -o - | FileCheck %t/ae_mulaf32s_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32s_ll_s2.ll -o - | FileCheck %t/ae_mulaf32s_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_h0.ll -o - | FileCheck %t/ae_mulaf32x16_h0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_h0_s2.ll -o - | FileCheck %t/ae_mulaf32x16_h0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_h1.ll -o - | FileCheck %t/ae_mulaf32x16_h1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_h1_s2.ll -o - | FileCheck %t/ae_mulaf32x16_h1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_h2.ll -o - | FileCheck %t/ae_mulaf32x16_h2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_h2_s2.ll -o - | FileCheck %t/ae_mulaf32x16_h2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_h3.ll -o - | FileCheck %t/ae_mulaf32x16_h3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_h3_s2.ll -o - | FileCheck %t/ae_mulaf32x16_h3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_l0.ll -o - | FileCheck %t/ae_mulaf32x16_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_l0_s2.ll -o - | FileCheck %t/ae_mulaf32x16_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_l1.ll -o - | FileCheck %t/ae_mulaf32x16_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_l1_s2.ll -o - | FileCheck %t/ae_mulaf32x16_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_l2.ll -o - | FileCheck %t/ae_mulaf32x16_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_l2_s2.ll -o - | FileCheck %t/ae_mulaf32x16_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_l3.ll -o - | FileCheck %t/ae_mulaf32x16_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf32x16_l3_s2.ll -o - | FileCheck %t/ae_mulaf32x16_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf48q32sp16s_l.ll -o - | FileCheck %t/ae_mulaf48q32sp16s_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf48q32sp16s_l_s2.ll -o - | FileCheck %t/ae_mulaf48q32sp16s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf48q32sp16u_l.ll -o - | FileCheck %t/ae_mulaf48q32sp16u_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaf48q32sp16u_l_s2.ll -o - | FileCheck %t/ae_mulaf48q32sp16u_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafc24ra.ll -o - | FileCheck %t/ae_mulafc24ra.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafc32x16ras_h.ll -o - | FileCheck %t/ae_mulafc32x16ras_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafc32x16ras_l.ll -o - | FileCheck %t/ae_mulafc32x16ras_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafd24x2_fir_h.ll -o - | FileCheck %t/ae_mulafd24x2_fir_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafd24x2_fir_l.ll -o - | FileCheck %t/ae_mulafd24x2_fir_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafd32x16x2_fir_hh.ll -o - | FileCheck %t/ae_mulafd32x16x2_fir_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafd32x16x2_fir_hl.ll -o - | FileCheck %t/ae_mulafd32x16x2_fir_hl.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafd32x16x2_fir_lh.ll -o - | FileCheck %t/ae_mulafd32x16x2_fir_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafd32x16x2_fir_ll.ll -o - | FileCheck %t/ae_mulafd32x16x2_fir_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp24x2r.ll -o - | FileCheck %t/ae_mulafp24x2r.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp24x2r_s2.ll -o - | FileCheck %t/ae_mulafp24x2r_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp24x2ra.ll -o - | FileCheck %t/ae_mulafp24x2ra.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp24x2ra_s2.ll -o - | FileCheck %t/ae_mulafp24x2ra_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x16x2ras_h.ll -o - | FileCheck %t/ae_mulafp32x16x2ras_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x16x2ras_h_s2.ll -o - | FileCheck %t/ae_mulafp32x16x2ras_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x16x2ras_l.ll -o - | FileCheck %t/ae_mulafp32x16x2ras_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x16x2ras_l_s2.ll -o - | FileCheck %t/ae_mulafp32x16x2ras_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x16x2rs_h.ll -o - | FileCheck %t/ae_mulafp32x16x2rs_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x16x2rs_h_s2.ll -o - | FileCheck %t/ae_mulafp32x16x2rs_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x16x2rs_l.ll -o - | FileCheck %t/ae_mulafp32x16x2rs_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x16x2rs_l_s2.ll -o - | FileCheck %t/ae_mulafp32x16x2rs_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x2ras.ll -o - | FileCheck %t/ae_mulafp32x2ras.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafp32x2rs.ll -o - | FileCheck %t/ae_mulafp32x2rs.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafq32sp24s_h_s2.ll -o - | FileCheck %t/ae_mulafq32sp24s_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulafq32sp24s_l_s2.ll -o - | FileCheck %t/ae_mulafq32sp24s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulap24x2.ll -o - | FileCheck %t/ae_mulap24x2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulap24x2_s2.ll -o - | FileCheck %t/ae_mulap24x2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulap32x16x2_h.ll -o - | FileCheck %t/ae_mulap32x16x2_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulap32x16x2_l.ll -o - | FileCheck %t/ae_mulap32x16x2_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulap32x2.ll -o - | FileCheck %t/ae_mulap32x2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaq32sp16s_l_s2.ll -o - | FileCheck %t/ae_mulaq32sp16s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulaq32sp16u_l_s2.ll -o - | FileCheck %t/ae_mulaq32sp16u_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mularfq32sp24s_h_s2.ll -o - | FileCheck %t/ae_mularfq32sp24s_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mularfq32sp24s_l_s2.ll -o - | FileCheck %t/ae_mularfq32sp24s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulas32f48p16s_hh.ll -o - | FileCheck %t/ae_mulas32f48p16s_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulas32f48p16s_hh_s2.ll -o - | FileCheck %t/ae_mulas32f48p16s_hh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulas32f48p16s_lh.ll -o - | FileCheck %t/ae_mulas32f48p16s_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulas32f48p16s_lh_s2.ll -o - | FileCheck %t/ae_mulas32f48p16s_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulas32f48p16s_ll.ll -o - | FileCheck %t/ae_mulas32f48p16s_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulas32f48p16s_ll_s2.ll -o - | FileCheck %t/ae_mulas32f48p16s_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasd24_hh_ll.ll -o - | FileCheck %t/ae_mulasd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulasd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasd24_hl_lh.ll -o - | FileCheck %t/ae_mulasd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulasd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulasd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulasd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulasd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulasd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasfd24_hh_ll.ll -o - | FileCheck %t/ae_mulasfd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasfd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulasfd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasfd24_hl_lh.ll -o - | FileCheck %t/ae_mulasfd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasfd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulasfd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasfd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulasfd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasfd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulasfd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasfd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulasfd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulasfd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulasfd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulc24.ll -o - | FileCheck %t/ae_mulc24.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulc32x16_h.ll -o - | FileCheck %t/ae_mulc32x16_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulc32x16_l.ll -o - | FileCheck %t/ae_mulc32x16_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_00.ll -o - | FileCheck %t/ae_mulf16ss_00.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_00_s2.ll -o - | FileCheck %t/ae_mulf16ss_00_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_10.ll -o - | FileCheck %t/ae_mulf16ss_10.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_11.ll -o - | FileCheck %t/ae_mulf16ss_11.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_20.ll -o - | FileCheck %t/ae_mulf16ss_20.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_21.ll -o - | FileCheck %t/ae_mulf16ss_21.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_22.ll -o - | FileCheck %t/ae_mulf16ss_22.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_30.ll -o - | FileCheck %t/ae_mulf16ss_30.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_31.ll -o - | FileCheck %t/ae_mulf16ss_31.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_32.ll -o - | FileCheck %t/ae_mulf16ss_32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16ss_33.ll -o - | FileCheck %t/ae_mulf16ss_33.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf16x4ss.ll -o - | FileCheck %t/ae_mulf16x4ss.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32r_hh.ll -o - | FileCheck %t/ae_mulf32r_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32r_lh.ll -o - | FileCheck %t/ae_mulf32r_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32r_ll.ll -o - | FileCheck %t/ae_mulf32r_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32r_ll_s2.ll -o - | FileCheck %t/ae_mulf32r_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32s_hh.ll -o - | FileCheck %t/ae_mulf32s_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32s_lh.ll -o - | FileCheck %t/ae_mulf32s_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32s_ll.ll -o - | FileCheck %t/ae_mulf32s_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32s_ll_s2.ll -o - | FileCheck %t/ae_mulf32s_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_h0.ll -o - | FileCheck %t/ae_mulf32x16_h0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_h0_s2.ll -o - | FileCheck %t/ae_mulf32x16_h0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_h1.ll -o - | FileCheck %t/ae_mulf32x16_h1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_h1_s2.ll -o - | FileCheck %t/ae_mulf32x16_h1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_h2.ll -o - | FileCheck %t/ae_mulf32x16_h2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_h2_s2.ll -o - | FileCheck %t/ae_mulf32x16_h2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_h3.ll -o - | FileCheck %t/ae_mulf32x16_h3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_h3_s2.ll -o - | FileCheck %t/ae_mulf32x16_h3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_l0.ll -o - | FileCheck %t/ae_mulf32x16_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_l0_s2.ll -o - | FileCheck %t/ae_mulf32x16_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_l1.ll -o - | FileCheck %t/ae_mulf32x16_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_l1_s2.ll -o - | FileCheck %t/ae_mulf32x16_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_l2.ll -o - | FileCheck %t/ae_mulf32x16_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_l2_s2.ll -o - | FileCheck %t/ae_mulf32x16_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_l3.ll -o - | FileCheck %t/ae_mulf32x16_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf32x16_l3_s2.ll -o - | FileCheck %t/ae_mulf32x16_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf48q32sp16s_l.ll -o - | FileCheck %t/ae_mulf48q32sp16s_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf48q32sp16s_l_s2.ll -o - | FileCheck %t/ae_mulf48q32sp16s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf48q32sp16u_l.ll -o - | FileCheck %t/ae_mulf48q32sp16u_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulf48q32sp16u_l_s2.ll -o - | FileCheck %t/ae_mulf48q32sp16u_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfc24ra.ll -o - | FileCheck %t/ae_mulfc24ra.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfc32x16ras_h.ll -o - | FileCheck %t/ae_mulfc32x16ras_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfc32x16ras_l.ll -o - | FileCheck %t/ae_mulfc32x16ras_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfd24x2_fir_h.ll -o - | FileCheck %t/ae_mulfd24x2_fir_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfd24x2_fir_l.ll -o - | FileCheck %t/ae_mulfd24x2_fir_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfd32x16x2_fir_hh.ll -o - | FileCheck %t/ae_mulfd32x16x2_fir_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfd32x16x2_fir_hl.ll -o - | FileCheck %t/ae_mulfd32x16x2_fir_hl.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfd32x16x2_fir_lh.ll -o - | FileCheck %t/ae_mulfd32x16x2_fir_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfd32x16x2_fir_ll.ll -o - | FileCheck %t/ae_mulfd32x16x2_fir_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp16x4ras.ll -o - | FileCheck %t/ae_mulfp16x4ras.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp16x4s.ll -o - | FileCheck %t/ae_mulfp16x4s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp24x2r.ll -o - | FileCheck %t/ae_mulfp24x2r.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp24x2r_s2.ll -o - | FileCheck %t/ae_mulfp24x2r_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp24x2ra.ll -o - | FileCheck %t/ae_mulfp24x2ra.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp24x2ra_s2.ll -o - | FileCheck %t/ae_mulfp24x2ra_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x16x2ras_h.ll -o - | FileCheck %t/ae_mulfp32x16x2ras_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x16x2ras_h_s2.ll -o - | FileCheck %t/ae_mulfp32x16x2ras_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x16x2ras_l.ll -o - | FileCheck %t/ae_mulfp32x16x2ras_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x16x2ras_l_s2.ll -o - | FileCheck %t/ae_mulfp32x16x2ras_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x16x2rs_h.ll -o - | FileCheck %t/ae_mulfp32x16x2rs_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x16x2rs_h_s2.ll -o - | FileCheck %t/ae_mulfp32x16x2rs_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x16x2rs_l.ll -o - | FileCheck %t/ae_mulfp32x16x2rs_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x16x2rs_l_s2.ll -o - | FileCheck %t/ae_mulfp32x16x2rs_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x2ras.ll -o - | FileCheck %t/ae_mulfp32x2ras.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfp32x2rs.ll -o - | FileCheck %t/ae_mulfp32x2rs.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfq32sp24s_h_s2.ll -o - | FileCheck %t/ae_mulfq32sp24s_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulfq32sp24s_l_s2.ll -o - | FileCheck %t/ae_mulfq32sp24s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulp24x2.ll -o - | FileCheck %t/ae_mulp24x2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulp24x2_s2.ll -o - | FileCheck %t/ae_mulp24x2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulp32x16x2_h.ll -o - | FileCheck %t/ae_mulp32x16x2_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulp32x16x2_l.ll -o - | FileCheck %t/ae_mulp32x16x2_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulp32x2.ll -o - | FileCheck %t/ae_mulp32x2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulq32sp16s_l_s2.ll -o - | FileCheck %t/ae_mulq32sp16s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulq32sp16u_l_s2.ll -o - | FileCheck %t/ae_mulq32sp16u_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulrfq32sp24s_h_s2.ll -o - | FileCheck %t/ae_mulrfq32sp24s_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulrfq32sp24s_l_s2.ll -o - | FileCheck %t/ae_mulrfq32sp24s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls16x4.ll -o - | FileCheck %t/ae_muls16x4.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32_hh.ll -o - | FileCheck %t/ae_muls32_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32_lh.ll -o - | FileCheck %t/ae_muls32_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32_ll.ll -o - | FileCheck %t/ae_muls32_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32f48p16s_hh.ll -o - | FileCheck %t/ae_muls32f48p16s_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32f48p16s_hh_s2.ll -o - | FileCheck %t/ae_muls32f48p16s_hh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32f48p16s_lh.ll -o - | FileCheck %t/ae_muls32f48p16s_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32f48p16s_lh_s2.ll -o - | FileCheck %t/ae_muls32f48p16s_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32f48p16s_ll.ll -o - | FileCheck %t/ae_muls32f48p16s_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32f48p16s_ll_s2.ll -o - | FileCheck %t/ae_muls32f48p16s_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32u_ll.ll -o - | FileCheck %t/ae_muls32u_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_h0.ll -o - | FileCheck %t/ae_muls32x16_h0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_h0_s2.ll -o - | FileCheck %t/ae_muls32x16_h0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_h1.ll -o - | FileCheck %t/ae_muls32x16_h1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_h1_s2.ll -o - | FileCheck %t/ae_muls32x16_h1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_h2.ll -o - | FileCheck %t/ae_muls32x16_h2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_h2_s2.ll -o - | FileCheck %t/ae_muls32x16_h2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_h3.ll -o - | FileCheck %t/ae_muls32x16_h3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_h3_s2.ll -o - | FileCheck %t/ae_muls32x16_h3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_l0.ll -o - | FileCheck %t/ae_muls32x16_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_l0_s2.ll -o - | FileCheck %t/ae_muls32x16_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_l1.ll -o - | FileCheck %t/ae_muls32x16_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_l1_s2.ll -o - | FileCheck %t/ae_muls32x16_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_l2.ll -o - | FileCheck %t/ae_muls32x16_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_l2_s2.ll -o - | FileCheck %t/ae_muls32x16_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_l3.ll -o - | FileCheck %t/ae_muls32x16_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_muls32x16_l3_s2.ll -o - | FileCheck %t/ae_muls32x16_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsad24_hh_ll.ll -o - | FileCheck %t/ae_mulsad24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsad24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulsad24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsad32x16_h1_l0.ll -o - | FileCheck %t/ae_mulsad32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsad32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulsad32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsad32x16_h3_l2.ll -o - | FileCheck %t/ae_mulsad32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsad32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulsad32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsafd24_hh_ll.ll -o - | FileCheck %t/ae_mulsafd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsafd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulsafd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsafd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulsafd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsafd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulsafd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsafd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulsafd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsafd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulsafd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_00.ll -o - | FileCheck %t/ae_mulsf16ss_00.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_00_s2.ll -o - | FileCheck %t/ae_mulsf16ss_00_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_10.ll -o - | FileCheck %t/ae_mulsf16ss_10.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_11.ll -o - | FileCheck %t/ae_mulsf16ss_11.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_20.ll -o - | FileCheck %t/ae_mulsf16ss_20.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_21.ll -o - | FileCheck %t/ae_mulsf16ss_21.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_22.ll -o - | FileCheck %t/ae_mulsf16ss_22.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_30.ll -o - | FileCheck %t/ae_mulsf16ss_30.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_31.ll -o - | FileCheck %t/ae_mulsf16ss_31.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_32.ll -o - | FileCheck %t/ae_mulsf16ss_32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16ss_33.ll -o - | FileCheck %t/ae_mulsf16ss_33.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf16x4ss.ll -o - | FileCheck %t/ae_mulsf16x4ss.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32r_hh.ll -o - | FileCheck %t/ae_mulsf32r_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32r_lh.ll -o - | FileCheck %t/ae_mulsf32r_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32r_ll.ll -o - | FileCheck %t/ae_mulsf32r_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32r_ll_s2.ll -o - | FileCheck %t/ae_mulsf32r_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32s_hh.ll -o - | FileCheck %t/ae_mulsf32s_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32s_lh.ll -o - | FileCheck %t/ae_mulsf32s_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32s_ll.ll -o - | FileCheck %t/ae_mulsf32s_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_h0.ll -o - | FileCheck %t/ae_mulsf32x16_h0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_h0_s2.ll -o - | FileCheck %t/ae_mulsf32x16_h0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_h1.ll -o - | FileCheck %t/ae_mulsf32x16_h1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_h1_s2.ll -o - | FileCheck %t/ae_mulsf32x16_h1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_h2.ll -o - | FileCheck %t/ae_mulsf32x16_h2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_h2_s2.ll -o - | FileCheck %t/ae_mulsf32x16_h2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_h3.ll -o - | FileCheck %t/ae_mulsf32x16_h3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_h3_s2.ll -o - | FileCheck %t/ae_mulsf32x16_h3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_l0.ll -o - | FileCheck %t/ae_mulsf32x16_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_l0_s2.ll -o - | FileCheck %t/ae_mulsf32x16_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_l1.ll -o - | FileCheck %t/ae_mulsf32x16_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_l1_s2.ll -o - | FileCheck %t/ae_mulsf32x16_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_l2.ll -o - | FileCheck %t/ae_mulsf32x16_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_l2_s2.ll -o - | FileCheck %t/ae_mulsf32x16_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_l3.ll -o - | FileCheck %t/ae_mulsf32x16_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf32x16_l3_s2.ll -o - | FileCheck %t/ae_mulsf32x16_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf48q32sp16s_l.ll -o - | FileCheck %t/ae_mulsf48q32sp16s_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf48q32sp16s_l_s2.ll -o - | FileCheck %t/ae_mulsf48q32sp16s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf48q32sp16u_l.ll -o - | FileCheck %t/ae_mulsf48q32sp16u_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsf48q32sp16u_l_s2.ll -o - | FileCheck %t/ae_mulsf48q32sp16u_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp24x2r.ll -o - | FileCheck %t/ae_mulsfp24x2r.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp24x2r_s2.ll -o - | FileCheck %t/ae_mulsfp24x2r_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp24x2ra.ll -o - | FileCheck %t/ae_mulsfp24x2ra.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp24x2ra_s2.ll -o - | FileCheck %t/ae_mulsfp24x2ra_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x16x2ras_h.ll -o - | FileCheck %t/ae_mulsfp32x16x2ras_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x16x2ras_h_s2.ll -o - | FileCheck %t/ae_mulsfp32x16x2ras_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x16x2ras_l.ll -o - | FileCheck %t/ae_mulsfp32x16x2ras_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x16x2ras_l_s2.ll -o - | FileCheck %t/ae_mulsfp32x16x2ras_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x16x2rs_h.ll -o - | FileCheck %t/ae_mulsfp32x16x2rs_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x16x2rs_h_s2.ll -o - | FileCheck %t/ae_mulsfp32x16x2rs_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x16x2rs_l.ll -o - | FileCheck %t/ae_mulsfp32x16x2rs_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x16x2rs_l_s2.ll -o - | FileCheck %t/ae_mulsfp32x16x2rs_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x2ras.ll -o - | FileCheck %t/ae_mulsfp32x2ras.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfp32x2rs.ll -o - | FileCheck %t/ae_mulsfp32x2rs.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfq32sp24s_h_s2.ll -o - | FileCheck %t/ae_mulsfq32sp24s_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsfq32sp24s_l_s2.ll -o - | FileCheck %t/ae_mulsfq32sp24s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsp24x2.ll -o - | FileCheck %t/ae_mulsp24x2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsp24x2_s2.ll -o - | FileCheck %t/ae_mulsp24x2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsp32x16x2_h.ll -o - | FileCheck %t/ae_mulsp32x16x2_h.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsp32x16x2_l.ll -o - | FileCheck %t/ae_mulsp32x16x2_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsp32x2.ll -o - | FileCheck %t/ae_mulsp32x2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsq32sp16s_l_s2.ll -o - | FileCheck %t/ae_mulsq32sp16s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsq32sp16u_l_s2.ll -o - | FileCheck %t/ae_mulsq32sp16u_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsrfq32sp24s_h_s2.ll -o - | FileCheck %t/ae_mulsrfq32sp24s_h_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulsrfq32sp24s_l_s2.ll -o - | FileCheck %t/ae_mulsrfq32sp24s_l_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulss32f48p16s_hh.ll -o - | FileCheck %t/ae_mulss32f48p16s_hh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulss32f48p16s_hh_s2.ll -o - | FileCheck %t/ae_mulss32f48p16s_hh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulss32f48p16s_lh.ll -o - | FileCheck %t/ae_mulss32f48p16s_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulss32f48p16s_lh_s2.ll -o - | FileCheck %t/ae_mulss32f48p16s_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulss32f48p16s_ll.ll -o - | FileCheck %t/ae_mulss32f48p16s_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulss32f48p16s_ll_s2.ll -o - | FileCheck %t/ae_mulss32f48p16s_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssd24_hh_ll.ll -o - | FileCheck %t/ae_mulssd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulssd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssd24_hl_lh.ll -o - | FileCheck %t/ae_mulssd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulssd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulssd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulssd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulssd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulssd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd16ss_11_00.ll -o - | FileCheck %t/ae_mulssfd16ss_11_00.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd16ss_11_00_s2.ll -o - | FileCheck %t/ae_mulssfd16ss_11_00_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd16ss_13_02.ll -o - | FileCheck %t/ae_mulssfd16ss_13_02.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd16ss_13_02_s2.ll -o - | FileCheck %t/ae_mulssfd16ss_13_02_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd16ss_33_22.ll -o - | FileCheck %t/ae_mulssfd16ss_33_22.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd16ss_33_22_s2.ll -o - | FileCheck %t/ae_mulssfd16ss_33_22_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd24_hh_ll.ll -o - | FileCheck %t/ae_mulssfd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulssfd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd24_hl_lh.ll -o - | FileCheck %t/ae_mulssfd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulssfd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulssfd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulssfd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulssfd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulssfd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulssfd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad24_hh_ll.ll -o - | FileCheck %t/ae_mulzaad24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulzaad24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad24_hl_lh.ll -o - | FileCheck %t/ae_mulzaad24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulzaad24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad32x16_h0_l1.ll -o - | FileCheck %t/ae_mulzaad32x16_h0_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad32x16_h0_l1_s2.ll -o - | FileCheck %t/ae_mulzaad32x16_h0_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad32x16_h1_l0.ll -o - | FileCheck %t/ae_mulzaad32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulzaad32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad32x16_h2_l3.ll -o - | FileCheck %t/ae_mulzaad32x16_h2_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad32x16_h2_l3_s2.ll -o - | FileCheck %t/ae_mulzaad32x16_h2_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad32x16_h3_l2.ll -o - | FileCheck %t/ae_mulzaad32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaad32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulzaad32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd16ss_11_00.ll -o - | FileCheck %t/ae_mulzaafd16ss_11_00.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd16ss_11_00_s2.ll -o - | FileCheck %t/ae_mulzaafd16ss_11_00_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd16ss_13_02.ll -o - | FileCheck %t/ae_mulzaafd16ss_13_02.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd16ss_13_02_s2.ll -o - | FileCheck %t/ae_mulzaafd16ss_13_02_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd16ss_33_22.ll -o - | FileCheck %t/ae_mulzaafd16ss_33_22.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd16ss_33_22_s2.ll -o - | FileCheck %t/ae_mulzaafd16ss_33_22_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd24_hh_ll.ll -o - | FileCheck %t/ae_mulzaafd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulzaafd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd24_hl_lh.ll -o - | FileCheck %t/ae_mulzaafd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulzaafd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd32x16_h0_l1.ll -o - | FileCheck %t/ae_mulzaafd32x16_h0_l1.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd32x16_h0_l1_s2.ll -o - | FileCheck %t/ae_mulzaafd32x16_h0_l1_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulzaafd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulzaafd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd32x16_h2_l3.ll -o - | FileCheck %t/ae_mulzaafd32x16_h2_l3.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd32x16_h2_l3_s2.ll -o - | FileCheck %t/ae_mulzaafd32x16_h2_l3_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulzaafd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzaafd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulzaafd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasd24_hh_ll.ll -o - | FileCheck %t/ae_mulzasd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulzasd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasd24_hl_lh.ll -o - | FileCheck %t/ae_mulzasd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulzasd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulzasd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulzasd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulzasd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulzasd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasfd24_hh_ll.ll -o - | FileCheck %t/ae_mulzasfd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasfd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulzasfd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasfd24_hl_lh.ll -o - | FileCheck %t/ae_mulzasfd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasfd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulzasfd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasfd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulzasfd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasfd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulzasfd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasfd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulzasfd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzasfd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulzasfd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsad24_hh_ll.ll -o - | FileCheck %t/ae_mulzsad24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsad24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulzsad24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsad32x16_h1_l0.ll -o - | FileCheck %t/ae_mulzsad32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsad32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulzsad32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsad32x16_h3_l2.ll -o - | FileCheck %t/ae_mulzsad32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsad32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulzsad32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsafd24_hh_ll.ll -o - | FileCheck %t/ae_mulzsafd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsafd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulzsafd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsafd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulzsafd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsafd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulzsafd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsafd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulzsafd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzsafd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulzsafd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssd24_hh_ll.ll -o - | FileCheck %t/ae_mulzssd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulzssd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssd24_hl_lh.ll -o - | FileCheck %t/ae_mulzssd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulzssd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulzssd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulzssd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulzssd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulzssd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd16ss_11_00.ll -o - | FileCheck %t/ae_mulzssfd16ss_11_00.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd16ss_11_00_s2.ll -o - | FileCheck %t/ae_mulzssfd16ss_11_00_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd16ss_13_02.ll -o - | FileCheck %t/ae_mulzssfd16ss_13_02.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd16ss_13_02_s2.ll -o - | FileCheck %t/ae_mulzssfd16ss_13_02_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd16ss_33_22.ll -o - | FileCheck %t/ae_mulzssfd16ss_33_22.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd16ss_33_22_s2.ll -o - | FileCheck %t/ae_mulzssfd16ss_33_22_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd24_hh_ll.ll -o - | FileCheck %t/ae_mulzssfd24_hh_ll.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd24_hh_ll_s2.ll -o - | FileCheck %t/ae_mulzssfd24_hh_ll_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd24_hl_lh.ll -o - | FileCheck %t/ae_mulzssfd24_hl_lh.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd24_hl_lh_s2.ll -o - | FileCheck %t/ae_mulzssfd24_hl_lh_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd32x16_h1_l0.ll -o - | FileCheck %t/ae_mulzssfd32x16_h1_l0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd32x16_h1_l0_s2.ll -o - | FileCheck %t/ae_mulzssfd32x16_h1_l0_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd32x16_h3_l2.ll -o - | FileCheck %t/ae_mulzssfd32x16_h3_l2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_mulzssfd32x16_h3_l2_s2.ll -o - | FileCheck %t/ae_mulzssfd32x16_h3_l2_s2.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_nand.ll -o - | FileCheck %t/ae_nand.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_neg16s.ll -o - | FileCheck %t/ae_neg16s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_neg24s.ll -o - | FileCheck %t/ae_neg24s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_neg32.ll -o - | FileCheck %t/ae_neg32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_neg32s.ll -o - | FileCheck %t/ae_neg32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_neg64.ll -o - | FileCheck %t/ae_neg64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_neg64s.ll -o - | FileCheck %t/ae_neg64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_nsa64.ll -o - | FileCheck %t/ae_nsa64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_nsaz16_0.ll -o - | FileCheck %t/ae_nsaz16_0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_nsaz32_l.ll -o - | FileCheck %t/ae_nsaz32_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_or.ll -o - | FileCheck %t/ae_or.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_pksr24.ll -o - | FileCheck %t/ae_pksr24.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_pksr32.ll -o - | FileCheck %t/ae_pksr32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_round16x4f32sasym.ll -o - | FileCheck %t/ae_round16x4f32sasym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_round16x4f32ssym.ll -o - | FileCheck %t/ae_round16x4f32ssym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_round24x2f48sasym.ll -o - | FileCheck %t/ae_round24x2f48sasym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_round24x2f48ssym.ll -o - | FileCheck %t/ae_round24x2f48ssym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_round32x2f48sasym.ll -o - | FileCheck %t/ae_round32x2f48sasym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_round32x2f48ssym.ll -o - | FileCheck %t/ae_round32x2f48ssym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_round32x2f64sasym.ll -o - | FileCheck %t/ae_round32x2f64sasym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_round32x2f64ssym.ll -o - | FileCheck %t/ae_round32x2f64ssym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_roundsp16f24asym.ll -o - | FileCheck %t/ae_roundsp16f24asym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_roundsp16f24sym.ll -o - | FileCheck %t/ae_roundsp16f24sym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_roundsp16q48x2asym.ll -o - | FileCheck %t/ae_roundsp16q48x2asym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_roundsp16q48x2sym.ll -o - | FileCheck %t/ae_roundsp16q48x2sym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_roundsq32f48asym.ll -o - | FileCheck %t/ae_roundsq32f48asym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_roundsq32f48sym.ll -o - | FileCheck %t/ae_roundsq32f48sym.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16_0_i.ll -o - | FileCheck %t/ae_s16_0_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16_0_ip.ll -o - | FileCheck %t/ae_s16_0_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16_0_x.ll -o - | FileCheck %t/ae_s16_0_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16_0_xc.ll -o - | FileCheck %t/ae_s16_0_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16_0_xp.ll -o - | FileCheck %t/ae_s16_0_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16m_l_i.ll -o - | FileCheck %t/ae_s16m_l_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16m_l_iu.ll -o - | FileCheck %t/ae_s16m_l_iu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16m_l_x.ll -o - | FileCheck %t/ae_s16m_l_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16m_l_xc.ll -o - | FileCheck %t/ae_s16m_l_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16m_l_xu.ll -o - | FileCheck %t/ae_s16m_l_xu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x2m_i.ll -o - | FileCheck %t/ae_s16x2m_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x2m_iu.ll -o - | FileCheck %t/ae_s16x2m_iu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x2m_x.ll -o - | FileCheck %t/ae_s16x2m_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x2m_xc.ll -o - | FileCheck %t/ae_s16x2m_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x2m_xu.ll -o - | FileCheck %t/ae_s16x2m_xu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x4_i.ll -o - | FileCheck %t/ae_s16x4_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x4_ip.ll -o - | FileCheck %t/ae_s16x4_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x4_ric.ll -o - | FileCheck %t/ae_s16x4_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x4_rip.ll -o - | FileCheck %t/ae_s16x4_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x4_x.ll -o - | FileCheck %t/ae_s16x4_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x4_xc.ll -o - | FileCheck %t/ae_s16x4_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s16x4_xp.ll -o - | FileCheck %t/ae_s16x4_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s24ra64s_i.ll -o - | FileCheck %t/ae_s24ra64s_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s24ra64s_ip.ll -o - | FileCheck %t/ae_s24ra64s_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s24ra64s_x.ll -o - | FileCheck %t/ae_s24ra64s_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s24ra64s_xc.ll -o - | FileCheck %t/ae_s24ra64s_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s24ra64s_xp.ll -o - | FileCheck %t/ae_s24ra64s_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s24x2ra64s_ip.ll -o - | FileCheck %t/ae_s24x2ra64s_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32_l_i.ll -o - | FileCheck %t/ae_s32_l_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32_l_ip.ll -o - | FileCheck %t/ae_s32_l_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32_l_x.ll -o - | FileCheck %t/ae_s32_l_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32_l_xc.ll -o - | FileCheck %t/ae_s32_l_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32_l_xp.ll -o - | FileCheck %t/ae_s32_l_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32f24_l_i.ll -o - | FileCheck %t/ae_s32f24_l_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32f24_l_ip.ll -o - | FileCheck %t/ae_s32f24_l_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32f24_l_x.ll -o - | FileCheck %t/ae_s32f24_l_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32f24_l_xc.ll -o - | FileCheck %t/ae_s32f24_l_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32f24_l_xp.ll -o - | FileCheck %t/ae_s32f24_l_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32m_i.ll -o - | FileCheck %t/ae_s32m_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32m_iu.ll -o - | FileCheck %t/ae_s32m_iu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32m_x.ll -o - | FileCheck %t/ae_s32m_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32m_xc.ll -o - | FileCheck %t/ae_s32m_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32m_xu.ll -o - | FileCheck %t/ae_s32m_xu.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32ra64s_i.ll -o - | FileCheck %t/ae_s32ra64s_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32ra64s_ip.ll -o - | FileCheck %t/ae_s32ra64s_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32ra64s_x.ll -o - | FileCheck %t/ae_s32ra64s_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32ra64s_xc.ll -o - | FileCheck %t/ae_s32ra64s_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32ra64s_xp.ll -o - | FileCheck %t/ae_s32ra64s_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2_i.ll -o - | FileCheck %t/ae_s32x2_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2_ip.ll -o - | FileCheck %t/ae_s32x2_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2_ric.ll -o - | FileCheck %t/ae_s32x2_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2_rip.ll -o - | FileCheck %t/ae_s32x2_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2_x.ll -o - | FileCheck %t/ae_s32x2_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2_xc.ll -o - | FileCheck %t/ae_s32x2_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2_xp.ll -o - | FileCheck %t/ae_s32x2_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2f24_i.ll -o - | FileCheck %t/ae_s32x2f24_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2f24_ip.ll -o - | FileCheck %t/ae_s32x2f24_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2f24_ric.ll -o - | FileCheck %t/ae_s32x2f24_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2f24_rip.ll -o - | FileCheck %t/ae_s32x2f24_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2f24_x.ll -o - | FileCheck %t/ae_s32x2f24_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2f24_xc.ll -o - | FileCheck %t/ae_s32x2f24_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2f24_xp.ll -o - | FileCheck %t/ae_s32x2f24_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s32x2ra64s_ip.ll -o - | FileCheck %t/ae_s32x2ra64s_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s64_i.ll -o - | FileCheck %t/ae_s64_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s64_ip.ll -o - | FileCheck %t/ae_s64_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s64_x.ll -o - | FileCheck %t/ae_s64_x.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s64_xc.ll -o - | FileCheck %t/ae_s64_xc.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_s64_xp.ll -o - | FileCheck %t/ae_s64_xp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa16x4_ic.ll -o - | FileCheck %t/ae_sa16x4_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa16x4_ip.ll -o - | FileCheck %t/ae_sa16x4_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa16x4_ric.ll -o - | FileCheck %t/ae_sa16x4_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa16x4_rip.ll -o - | FileCheck %t/ae_sa16x4_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa24_l_ic.ll -o - | FileCheck %t/ae_sa24_l_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa24_l_ip.ll -o - | FileCheck %t/ae_sa24_l_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa24_l_ric.ll -o - | FileCheck %t/ae_sa24_l_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa24_l_rip.ll -o - | FileCheck %t/ae_sa24_l_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa24x2_ic.ll -o - | FileCheck %t/ae_sa24x2_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa24x2_ip.ll -o - | FileCheck %t/ae_sa24x2_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa24x2_ric.ll -o - | FileCheck %t/ae_sa24x2_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa24x2_rip.ll -o - | FileCheck %t/ae_sa24x2_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa32x2_ic.ll -o - | FileCheck %t/ae_sa32x2_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa32x2_ip.ll -o - | FileCheck %t/ae_sa32x2_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa32x2_ric.ll -o - | FileCheck %t/ae_sa32x2_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa32x2_rip.ll -o - | FileCheck %t/ae_sa32x2_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa32x2f24_ic.ll -o - | FileCheck %t/ae_sa32x2f24_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa32x2f24_ip.ll -o - | FileCheck %t/ae_sa32x2f24_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa32x2f24_ric.ll -o - | FileCheck %t/ae_sa32x2f24_ric.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa32x2f24_rip.ll -o - | FileCheck %t/ae_sa32x2f24_rip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa64neg_fp.ll -o - | FileCheck %t/ae_sa64neg_fp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sa64pos_fp.ll -o - | FileCheck %t/ae_sa64pos_fp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_salign64_i.ll -o - | FileCheck %t/ae_salign64_i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sat16x4.ll -o - | FileCheck %t/ae_sat16x4.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sat24s.ll -o - | FileCheck %t/ae_sat24s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sat48s.ll -o - | FileCheck %t/ae_sat48s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_satq56s.ll -o - | FileCheck %t/ae_satq56s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sb.ll -o - | FileCheck %t/ae_sb.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sb_ic.ll -o - | FileCheck %t/ae_sb_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sb_ip.ll -o - | FileCheck %t/ae_sb_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sbf.ll -o - | FileCheck %t/ae_sbf.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sbf_ic.ll -o - | FileCheck %t/ae_sbf_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sbf_ip.ll -o - | FileCheck %t/ae_sbf_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sbi.ll -o - | FileCheck %t/ae_sbi.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sbi_ic.ll -o - | FileCheck %t/ae_sbi_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sbi_ip.ll -o - | FileCheck %t/ae_sbi_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sel16i.ll -o - | FileCheck %t/ae_sel16i.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sel16i_n.ll -o - | FileCheck %t/ae_sel16i_n.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sext32.ll -o - | FileCheck %t/ae_sext32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sext32x2d16_10.ll -o - | FileCheck %t/ae_sext32x2d16_10.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sext32x2d16_32.ll -o - | FileCheck %t/ae_sext32x2d16_32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sha32.ll -o - | FileCheck %t/ae_sha32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_shortswap.ll -o - | FileCheck %t/ae_shortswap.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slaa16s.ll -o - | FileCheck %t/ae_slaa16s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slaa32.ll -o - | FileCheck %t/ae_slaa32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slaa32s.ll -o - | FileCheck %t/ae_slaa32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slaa64.ll -o - | FileCheck %t/ae_slaa64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slaa64s.ll -o - | FileCheck %t/ae_slaa64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slaaq56.ll -o - | FileCheck %t/ae_slaaq56.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slai16s.ll -o - | FileCheck %t/ae_slai16s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slai24.ll -o - | FileCheck %t/ae_slai24.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slai24s.ll -o - | FileCheck %t/ae_slai24s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slai32.ll -o - | FileCheck %t/ae_slai32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slai32s.ll -o - | FileCheck %t/ae_slai32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slai64.ll -o - | FileCheck %t/ae_slai64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slai64s.ll -o - | FileCheck %t/ae_slai64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slaisq56s.ll -o - | FileCheck %t/ae_slaisq56s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slas24.ll -o - | FileCheck %t/ae_slas24.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slas24s.ll -o - | FileCheck %t/ae_slas24s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slas32.ll -o - | FileCheck %t/ae_slas32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slas32s.ll -o - | FileCheck %t/ae_slas32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slas64.ll -o - | FileCheck %t/ae_slas64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slas64s.ll -o - | FileCheck %t/ae_slas64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slasq56.ll -o - | FileCheck %t/ae_slasq56.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_slassq56s.ll -o - | FileCheck %t/ae_slassq56s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sra64_32.ll -o - | FileCheck %t/ae_sra64_32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sraa16rs.ll -o - | FileCheck %t/ae_sraa16rs.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sraa16s.ll -o - | FileCheck %t/ae_sraa16s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sraa32.ll -o - | FileCheck %t/ae_sraa32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sraa32rs.ll -o - | FileCheck %t/ae_sraa32rs.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sraa32s.ll -o - | FileCheck %t/ae_sraa32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sraa64.ll -o - | FileCheck %t/ae_sraa64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srai16.ll -o - | FileCheck %t/ae_srai16.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srai16r.ll -o - | FileCheck %t/ae_srai16r.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srai24.ll -o - | FileCheck %t/ae_srai24.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srai32.ll -o - | FileCheck %t/ae_srai32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srai32r.ll -o - | FileCheck %t/ae_srai32r.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srai64.ll -o - | FileCheck %t/ae_srai64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sras24.ll -o - | FileCheck %t/ae_sras24.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sras32.ll -o - | FileCheck %t/ae_sras32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sras64.ll -o - | FileCheck %t/ae_sras64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srla32.ll -o - | FileCheck %t/ae_srla32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srla64.ll -o - | FileCheck %t/ae_srla64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srli24.ll -o - | FileCheck %t/ae_srli24.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srli32.ll -o - | FileCheck %t/ae_srli32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srli64.ll -o - | FileCheck %t/ae_srli64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srls24.ll -o - | FileCheck %t/ae_srls24.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srls32.ll -o - | FileCheck %t/ae_srls32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_srls64.ll -o - | FileCheck %t/ae_srls64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sub16.ll -o - | FileCheck %t/ae_sub16.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sub16s.ll -o - | FileCheck %t/ae_sub16s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sub24s.ll -o - | FileCheck %t/ae_sub24s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sub32.ll -o - | FileCheck %t/ae_sub32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sub32s.ll -o - | FileCheck %t/ae_sub32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sub64.ll -o - | FileCheck %t/ae_sub64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_sub64s.ll -o - | FileCheck %t/ae_sub64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_subadd32.ll -o - | FileCheck %t/ae_subadd32.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_subadd32s.ll -o - | FileCheck %t/ae_subadd32s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_trunca32f64s_l.ll -o - | FileCheck %t/ae_trunca32f64s_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_trunca32x2f64s.ll -o - | FileCheck %t/ae_trunca32x2f64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_trunci32f64s_l.ll -o - | FileCheck %t/ae_trunci32f64s_l.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_trunci32x2f64s.ll -o - | FileCheck %t/ae_trunci32x2f64s.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vldl16c.ll -o - | FileCheck %t/ae_vldl16c.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vldl16c_ic.ll -o - | FileCheck %t/ae_vldl16c_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vldl16c_ip.ll -o - | FileCheck %t/ae_vldl16c_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vldl16t.ll -o - | FileCheck %t/ae_vldl16t.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vldl32t.ll -o - | FileCheck %t/ae_vldl32t.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vldsht.ll -o - | FileCheck %t/ae_vldsht.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vlel16t.ll -o - | FileCheck %t/ae_vlel16t.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vlel32t.ll -o - | FileCheck %t/ae_vlel32t.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vles16c.ll -o - | FileCheck %t/ae_vles16c.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vles16c_ic.ll -o - | FileCheck %t/ae_vles16c_ic.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_vles16c_ip.ll -o - | FileCheck %t/ae_vles16c_ip.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_xor.ll -o - | FileCheck %t/ae_xor.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/ae_zalign64.ll -o - | FileCheck %t/ae_zalign64.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_bithead.ll -o - | FileCheck %t/rur_ae_bithead.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_bitptr.ll -o - | FileCheck %t/rur_ae_bitptr.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_bitsused.ll -o - | FileCheck %t/rur_ae_bitsused.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_cbegin0.ll -o - | FileCheck %t/rur_ae_cbegin0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_cend0.ll -o - | FileCheck %t/rur_ae_cend0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_cw_sd_no.ll -o - | FileCheck %t/rur_ae_cw_sd_no.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_cwrap.ll -o - | FileCheck %t/rur_ae_cwrap.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_first_ts.ll -o - | FileCheck %t/rur_ae_first_ts.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_nextoffset.ll -o - | FileCheck %t/rur_ae_nextoffset.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_overflow.ll -o - | FileCheck %t/rur_ae_overflow.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_ovf_sar.ll -o - | FileCheck %t/rur_ae_ovf_sar.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_sar.ll -o - | FileCheck %t/rur_ae_sar.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_searchdone.ll -o - | FileCheck %t/rur_ae_searchdone.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_tablesize.ll -o - | FileCheck %t/rur_ae_tablesize.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/rur_ae_ts_fts_bu_bp.ll -o - | FileCheck %t/rur_ae_ts_fts_bu_bp.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_bithead.ll -o - | FileCheck %t/wur_ae_bithead.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_bitptr.ll -o - | FileCheck %t/wur_ae_bitptr.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_bitsused.ll -o - | FileCheck %t/wur_ae_bitsused.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_cbegin0.ll -o - | FileCheck %t/wur_ae_cbegin0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_cend0.ll -o - | FileCheck %t/wur_ae_cend0.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_cw_sd_no.ll -o - | FileCheck %t/wur_ae_cw_sd_no.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_cwrap.ll -o - | FileCheck %t/wur_ae_cwrap.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_first_ts.ll -o - | FileCheck %t/wur_ae_first_ts.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_nextoffset.ll -o - | FileCheck %t/wur_ae_nextoffset.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_overflow.ll -o - | FileCheck %t/wur_ae_overflow.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_ovf_sar.ll -o - | FileCheck %t/wur_ae_ovf_sar.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_sar.ll -o - | FileCheck %t/wur_ae_sar.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_searchdone.ll -o - | FileCheck %t/wur_ae_searchdone.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_tablesize.ll -o - | FileCheck %t/wur_ae_tablesize.ll +; RUN: llc -mtriple=xtensa -mcpu=cnl %t/wur_ae_ts_fts_bu_bp.ll -o - | FileCheck %t/wur_ae_ts_fts_bu_bp.ll +;--- ae_abs16s.ll +declare <4 x i16> @llvm.xtensa.ae.abs16s(<4 x i16>) +define <4 x i16> @test_xtensa_ae_abs16s(<4 x i16> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_abs16s: + +%ret = call <4 x i16> @llvm.xtensa.ae.abs16s(<4 x i16> %ae_arth_v1) +; CHECK: ae_abs16s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_abs24s.ll +declare <2 x i32> @llvm.xtensa.ae.abs24s(<2 x i32>) +define <2 x i32> @test_xtensa_ae_abs24s(<2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_abs24s: + +%ret = call <2 x i32> @llvm.xtensa.ae.abs24s(<2 x i32> %ae_arth_v1) +; CHECK: ae_abs24s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_abs32.ll +declare <2 x i32> @llvm.xtensa.ae.abs32(<2 x i32>) +define <2 x i32> @test_xtensa_ae_abs32(<2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_abs32: + +%ret = call <2 x i32> @llvm.xtensa.ae.abs32(<2 x i32> %ae_arth_v1) +; CHECK: ae_abs32 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_abs32s.ll +declare <2 x i32> @llvm.xtensa.ae.abs32s(<2 x i32>) +define <2 x i32> @test_xtensa_ae_abs32s(<2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_abs32s: + +%ret = call <2 x i32> @llvm.xtensa.ae.abs32s(<2 x i32> %ae_arth_v1) +; CHECK: ae_abs32s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_abs64.ll +declare <1 x i64> @llvm.xtensa.ae.abs64(<1 x i64>) +define <1 x i64> @test_xtensa_ae_abs64(<1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_abs64: + +%ret = call <1 x i64> @llvm.xtensa.ae.abs64(<1 x i64> %ae_arth_v1) +; CHECK: ae_abs64 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_abs64s.ll +declare <1 x i64> @llvm.xtensa.ae.abs64s(<1 x i64>) +define <1 x i64> @test_xtensa_ae_abs64s(<1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_abs64s: + +%ret = call <1 x i64> @llvm.xtensa.ae.abs64s(<1 x i64> %ae_arth_v1) +; CHECK: ae_abs64s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_add16.ll +declare <4 x i16> @llvm.xtensa.ae.add16(<4 x i16>, <4 x i16>) +define <4 x i16> @test_xtensa_ae_add16(<4 x i16> %ae_arth_v0, <4 x i16> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_add16: + + +%ret = call <4 x i16> @llvm.xtensa.ae.add16(<4 x i16> %ae_arth_v0, <4 x i16> %ae_arth_v1) +; CHECK: ae_add16 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_add16s.ll +declare <4 x i16> @llvm.xtensa.ae.add16s(<4 x i16>, <4 x i16>) +define <4 x i16> @test_xtensa_ae_add16s(<4 x i16> %ae_arth_v0, <4 x i16> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_add16s: + + +%ret = call <4 x i16> @llvm.xtensa.ae.add16s(<4 x i16> %ae_arth_v0, <4 x i16> %ae_arth_v1) +; CHECK: ae_add16s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_add24s.ll +declare <2 x i32> @llvm.xtensa.ae.add24s(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_add24s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_add24s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.add24s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_add24s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_add32.ll +declare <2 x i32> @llvm.xtensa.ae.add32(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_add32(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_add32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.add32(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_add32 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_add32_hl_lh.ll +declare <2 x i32> @llvm.xtensa.ae.add32.hl.lh(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_add32_hl_lh(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_add32_hl_lh: + + +%ret = call <2 x i32> @llvm.xtensa.ae.add32.hl.lh(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_add32_hl_lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_add32s.ll +declare <2 x i32> @llvm.xtensa.ae.add32s(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_add32s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_add32s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.add32s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_add32s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_add64.ll +declare <1 x i64> @llvm.xtensa.ae.add64(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_add64(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_add64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.add64(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_add64 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_add64s.ll +declare <1 x i64> @llvm.xtensa.ae.add64s(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_add64s(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_add64s: + + +%ret = call <1 x i64> @llvm.xtensa.ae.add64s(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_add64s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_addbrba32.ll +declare i32 @llvm.xtensa.ae.addbrba32(i32, i32) +define i32 @test_xtensa_ae_addbrba32(i32 %art, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_addbrba32: + + +%ret = call i32 @llvm.xtensa.ae.addbrba32(i32 %art, i32 %ars) +; CHECK: ae_addbrba32 a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_addsub32.ll +declare <2 x i32> @llvm.xtensa.ae.addsub32(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_addsub32(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_addsub32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.addsub32(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_addsub32 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_addsub32s.ll +declare <2 x i32> @llvm.xtensa.ae.addsub32s(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_addsub32s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_addsub32s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.addsub32s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_addsub32s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_and.ll +declare <1 x i64> @llvm.xtensa.ae.and(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_and(<1 x i64> %ae_dr_to_dr_v0, <1 x i64> %ae_dr_to_dr_v1) { +; CHECK-LABEL: test_xtensa_ae_and: + + +%ret = call <1 x i64> @llvm.xtensa.ae.and(<1 x i64> %ae_dr_to_dr_v0, <1 x i64> %ae_dr_to_dr_v1) +; CHECK: ae_and aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_cvt32x2f16_10.ll +declare <2 x i32> @llvm.xtensa.ae.cvt32x2f16.10(<4 x i16>) +define <2 x i32> @test_xtensa_ae_cvt32x2f16_10(<4 x i16> %ae_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_cvt32x2f16_10: + +%ret = call <2 x i32> @llvm.xtensa.ae.cvt32x2f16.10(<4 x i16> %ae_to_dr_v0) +; CHECK: ae_cvt32x2f16.10 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_cvt32x2f16_32.ll +declare <2 x i32> @llvm.xtensa.ae.cvt32x2f16.32(<4 x i16>) +define <2 x i32> @test_xtensa_ae_cvt32x2f16_32(<4 x i16> %ae_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_cvt32x2f16_32: + +%ret = call <2 x i32> @llvm.xtensa.ae.cvt32x2f16.32(<4 x i16> %ae_to_dr_v0) +; CHECK: ae_cvt32x2f16.32 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_cvt48a32.ll +declare <1 x i64> @llvm.xtensa.ae.cvt48a32(i32) +define <1 x i64> @test_xtensa_ae_cvt48a32(i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_cvt48a32: + +%ret = call <1 x i64> @llvm.xtensa.ae.cvt48a32(i32 %ars) +; CHECK: ae_cvt48a32 aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_cvt64a32.ll +declare <1 x i64> @llvm.xtensa.ae.cvt64a32(i32) +define <1 x i64> @test_xtensa_ae_cvt64a32(i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_cvt64a32: + +%ret = call <1 x i64> @llvm.xtensa.ae.cvt64a32(i32 %ars) +; CHECK: ae_cvt64a32 aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_cvt64f32_h.ll +declare <1 x i64> @llvm.xtensa.ae.cvt64f32.h(<2 x i32>) +define <1 x i64> @test_xtensa_ae_cvt64f32_h(<2 x i32> %ae_dr_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_cvt64f32_h: + +%ret = call <1 x i64> @llvm.xtensa.ae.cvt64f32.h(<2 x i32> %ae_dr_to_dr_v0) +; CHECK: ae_cvt64f32.h aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_cvta32f24s_h.ll +declare i32 @llvm.xtensa.ae.cvta32f24s.h(<2 x i32>) +define i32 @test_xtensa_ae_cvta32f24s_h(<2 x i32> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_cvta32f24s_h: + +%ret = call i32 @llvm.xtensa.ae.cvta32f24s.h(<2 x i32> %ae_dr_to_ar_v0) +; CHECK: ae_cvta32f24s.h a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_cvta32f24s_l.ll +declare i32 @llvm.xtensa.ae.cvta32f24s.l(<2 x i32>) +define i32 @test_xtensa_ae_cvta32f24s_l(<2 x i32> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_cvta32f24s_l: + +%ret = call i32 @llvm.xtensa.ae.cvta32f24s.l(<2 x i32> %ae_dr_to_ar_v0) +; CHECK: ae_cvta32f24s.l a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_cvtq56a32s.ll +declare <1 x i64> @llvm.xtensa.ae.cvtq56a32s(i32) +define <1 x i64> @test_xtensa_ae_cvtq56a32s(i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_cvtq56a32s: + +%ret = call <1 x i64> @llvm.xtensa.ae.cvtq56a32s(i32 %ars) +; CHECK: ae_cvtq56a32s aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_cvtq56p32s_h.ll +declare <1 x i64> @llvm.xtensa.ae.cvtq56p32s.h(<2 x i32>) +define <1 x i64> @test_xtensa_ae_cvtq56p32s_h(<2 x i32> %ae_dr_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_cvtq56p32s_h: + +%ret = call <1 x i64> @llvm.xtensa.ae.cvtq56p32s.h(<2 x i32> %ae_dr_to_dr_v0) +; CHECK: ae_cvtq56p32s.h aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_cvtq56p32s_l.ll +declare <1 x i64> @llvm.xtensa.ae.cvtq56p32s.l(<2 x i32>) +define <1 x i64> @test_xtensa_ae_cvtq56p32s_l(<2 x i32> %ae_dr_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_cvtq56p32s_l: + +%ret = call <1 x i64> @llvm.xtensa.ae.cvtq56p32s.l(<2 x i32> %ae_dr_to_dr_v0) +; CHECK: ae_cvtq56p32s.l aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_db.ll +declare ptr @llvm.xtensa.ae.db(ptr, i32) +define ptr @test_xtensa_ae_db(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_db: + + +%ret = call ptr @llvm.xtensa.ae.db(ptr %ars, i32 %art) +; CHECK: ae_db a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_db_ic.ll +declare ptr @llvm.xtensa.ae.db.ic(ptr, i32) +define ptr @test_xtensa_ae_db_ic(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_db_ic: + + +%ret = call ptr @llvm.xtensa.ae.db.ic(ptr %ars, i32 %art) +; CHECK: ae_db.ic a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_db_ip.ll +declare ptr @llvm.xtensa.ae.db.ip(ptr, i32) +define ptr @test_xtensa_ae_db_ip(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_db_ip: + + +%ret = call ptr @llvm.xtensa.ae.db.ip(ptr %ars, i32 %art) +; CHECK: ae_db.ip a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_dbi.ll +declare ptr @llvm.xtensa.ae.dbi(ptr, i32) +define ptr @test_xtensa_ae_dbi(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_dbi: + + +%ret = call ptr @llvm.xtensa.ae.dbi(ptr %ars, i32 1) +; CHECK: ae_dbi a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_dbi_ic.ll +declare ptr @llvm.xtensa.ae.dbi.ic(ptr, i32) +define ptr @test_xtensa_ae_dbi_ic(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_dbi_ic: + + +%ret = call ptr @llvm.xtensa.ae.dbi.ic(ptr %ars, i32 1) +; CHECK: ae_dbi.ic a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_dbi_ip.ll +declare ptr @llvm.xtensa.ae.dbi.ip(ptr, i32) +define ptr @test_xtensa_ae_dbi_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_dbi_ip: + + +%ret = call ptr @llvm.xtensa.ae.dbi.ip(ptr %ars, i32 1) +; CHECK: ae_dbi.ip a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_div64d32_h.ll +declare <1 x i64> @llvm.xtensa.ae.div64d32.h(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_div64d32_h(<1 x i64> %ae_arth_v, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_div64d32_h: + + +%ret = call <1 x i64> @llvm.xtensa.ae.div64d32.h(<1 x i64> %ae_arth_v, <2 x i32> %ae_arth_v1) +; CHECK: ae_div64d32.h aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_div64d32_l.ll +declare <1 x i64> @llvm.xtensa.ae.div64d32.l(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_div64d32_l(<1 x i64> %ae_arth_v, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_div64d32_l: + + +%ret = call <1 x i64> @llvm.xtensa.ae.div64d32.l(<1 x i64> %ae_arth_v, <2 x i32> %ae_arth_v1) +; CHECK: ae_div64d32.l aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_eq64.ll +declare <1 x i1> @llvm.xtensa.ae.eq64(<1 x i64>, <1 x i64>) +define <1 x i1> @test_xtensa_ae_eq64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_eq64: + + +%ret = call <1 x i1> @llvm.xtensa.ae.eq64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) +; CHECK: ae_eq64 b{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i1> %ret +} + + +;--- ae_l16_i.ll +declare <4 x i16> @llvm.xtensa.ae.l16.i(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16_i: + + +%ret = call <4 x i16> @llvm.xtensa.ae.l16.i(ptr %ars, i32 -16) +; CHECK: ae_l16.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_l16_ip.ll +declare { <4 x i16>, ptr } @llvm.xtensa.ae.l16.ip(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16_ip: + + +%ret = call { <4 x i16>, ptr } @llvm.xtensa.ae.l16.ip(ptr %ars, i32 -16) +; CHECK: ae_l16.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <4 x i16>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_l16_x.ll +declare <4 x i16> @llvm.xtensa.ae.l16.x(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16_x: + + +%ret = call <4 x i16> @llvm.xtensa.ae.l16.x(ptr %ars, i32 %art) +; CHECK: ae_l16.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_l16_xc.ll +declare { <4 x i16>, ptr } @llvm.xtensa.ae.l16.xc(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16_xc: + + +%ret = call { <4 x i16>, ptr } @llvm.xtensa.ae.l16.xc(ptr %ars, i32 %art) +; CHECK: ae_l16.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_l16_xp.ll +declare { <4 x i16>, ptr } @llvm.xtensa.ae.l16.xp(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16_xp(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16_xp: + + +%ret = call { <4 x i16>, ptr } @llvm.xtensa.ae.l16.xp(ptr %ars, i32 %art) +; CHECK: ae_l16.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_l16m_i.ll +declare <2 x i32> @llvm.xtensa.ae.l16m.i(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16m_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16m_i: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l16m.i(ptr %ars, i32 -16) +; CHECK: ae_l16m.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l16m_iu.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.iu(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16m_iu(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16m_iu: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.iu(ptr %ars, i32 -16) +; CHECK: ae_l16m.iu aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l16m_x.ll +declare <2 x i32> @llvm.xtensa.ae.l16m.x(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16m_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16m_x: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l16m.x(ptr %ars, i32 %art) +; CHECK: ae_l16m.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l16m_xc.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.xc(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16m_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16m_xc: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.xc(ptr %ars, i32 %art) +; CHECK: ae_l16m.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l16m_xu.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.xu(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16m_xu(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16m_xu: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.xu(ptr %ars, i32 %art) +; CHECK: ae_l16m.xu aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l16x2m_i.ll +declare <2 x i32> @llvm.xtensa.ae.l16x2m.i(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16x2m_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16x2m_i: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l16x2m.i(ptr %ars, i32 -32) +; CHECK: ae_l16x2m.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l16x2m_iu.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.iu(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16x2m_iu(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16x2m_iu: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.iu(ptr %ars, i32 -32) +; CHECK: ae_l16x2m.iu aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l16x2m_x.ll +declare <2 x i32> @llvm.xtensa.ae.l16x2m.x(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16x2m_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16x2m_x: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l16x2m.x(ptr %ars, i32 %art) +; CHECK: ae_l16x2m.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l16x2m_xc.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.xc(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16x2m_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16x2m_xc: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.xc(ptr %ars, i32 %art) +; CHECK: ae_l16x2m.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l16x2m_xu.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.xu(ptr, i32) +define <2 x i32> @test_xtensa_ae_l16x2m_xu(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16x2m_xu: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.xu(ptr %ars, i32 %art) +; CHECK: ae_l16x2m.xu aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l16x4_i.ll +declare <4 x i16> @llvm.xtensa.ae.l16x4.i(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16x4_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16x4_i: + + +%ret = call <4 x i16> @llvm.xtensa.ae.l16x4.i(ptr %ars, i32 -64) +; CHECK: ae_l16x4.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_l16x4_ip.ll +declare { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.ip(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16x4_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16x4_ip: + + +%ret = call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.ip(ptr %ars, i32 0) +; CHECK: ae_l16x4.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <4 x i16>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_l16x4_ric.ll +declare { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.ric(ptr) +define <4 x i16> @test_xtensa_ae_l16x4_ric(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16x4_ric: + +%ret = call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.ric(ptr %ars) +; CHECK: ae_l16x4.ric aed{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_l16x4_rip.ll +declare { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.rip(ptr) +define <4 x i16> @test_xtensa_ae_l16x4_rip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l16x4_rip: + +%ret = call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.rip(ptr %ars) +; CHECK: ae_l16x4.rip aed{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_l16x4_x.ll +declare <4 x i16> @llvm.xtensa.ae.l16x4.x(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16x4_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16x4_x: + + +%ret = call <4 x i16> @llvm.xtensa.ae.l16x4.x(ptr %ars, i32 %art) +; CHECK: ae_l16x4.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_l16x4_xc.ll +declare { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.xc(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16x4_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16x4_xc: + + +%ret = call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.xc(ptr %ars, i32 %art) +; CHECK: ae_l16x4.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_l16x4_xp.ll +declare { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.xp(ptr, i32) +define <4 x i16> @test_xtensa_ae_l16x4_xp(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l16x4_xp: + + +%ret = call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.xp(ptr %ars, i32 %art) +; CHECK: ae_l16x4.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_l32_i.ll +declare <2 x i32> @llvm.xtensa.ae.l32.i(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32_i: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l32.i(ptr %ars, i32 -32) +; CHECK: ae_l32.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l32_ip.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32.ip(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32_ip: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32.ip(ptr %ars, i32 -32) +; CHECK: ae_l32.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32_x.ll +declare <2 x i32> @llvm.xtensa.ae.l32.x(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32_x: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l32.x(ptr %ars, i32 %art) +; CHECK: ae_l32.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l32_xc.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32.xc(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32_xc: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32.xc(ptr %ars, i32 %art) +; CHECK: ae_l32.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32_xp.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32.xp(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32_xp(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32_xp: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32.xp(ptr %ars, i32 %art) +; CHECK: ae_l32.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32f24_i.ll +declare <2 x i32> @llvm.xtensa.ae.l32f24.i(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32f24_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32f24_i: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l32f24.i(ptr %ars, i32 -32) +; CHECK: ae_l32f24.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l32f24_ip.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.ip(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32f24_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32f24_ip: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.ip(ptr %ars, i32 -32) +; CHECK: ae_l32f24.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32f24_x.ll +declare <2 x i32> @llvm.xtensa.ae.l32f24.x(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32f24_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32f24_x: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l32f24.x(ptr %ars, i32 %art) +; CHECK: ae_l32f24.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l32f24_xc.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.xc(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32f24_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32f24_xc: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.xc(ptr %ars, i32 %art) +; CHECK: ae_l32f24.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32f24_xp.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.xp(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32f24_xp(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32f24_xp: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.xp(ptr %ars, i32 %art) +; CHECK: ae_l32f24.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32m_i.ll +declare <1 x i64> @llvm.xtensa.ae.l32m.i(ptr, i32) +define <1 x i64> @test_xtensa_ae_l32m_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32m_i: + + +%ret = call <1 x i64> @llvm.xtensa.ae.l32m.i(ptr %ars, i32 -32) +; CHECK: ae_l32m.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_l32m_iu.ll +declare { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.iu(ptr, i32) +define <1 x i64> @test_xtensa_ae_l32m_iu(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32m_iu: + + +%ret = call { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.iu(ptr %ars, i32 -32) +; CHECK: ae_l32m.iu aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <1 x i64>, ptr } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_l32m_x.ll +declare <1 x i64> @llvm.xtensa.ae.l32m.x(ptr, i32) +define <1 x i64> @test_xtensa_ae_l32m_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32m_x: + + +%ret = call <1 x i64> @llvm.xtensa.ae.l32m.x(ptr %ars, i32 %art) +; CHECK: ae_l32m.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_l32m_xc.ll +declare { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.xc(ptr, i32) +define <1 x i64> @test_xtensa_ae_l32m_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32m_xc: + + +%ret = call { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.xc(ptr %ars, i32 %art) +; CHECK: ae_l32m.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <1 x i64>, ptr } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_l32m_xu.ll +declare { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.xu(ptr, i32) +define <1 x i64> @test_xtensa_ae_l32m_xu(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32m_xu: + + +%ret = call { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.xu(ptr %ars, i32 %art) +; CHECK: ae_l32m.xu aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <1 x i64>, ptr } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_l32x2_i.ll +declare <2 x i32> @llvm.xtensa.ae.l32x2.i(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32x2_i: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l32x2.i(ptr %ars, i32 -64) +; CHECK: ae_l32x2.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l32x2_ip.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.ip(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32x2_ip: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.ip(ptr %ars, i32 0) +; CHECK: ae_l32x2.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32x2_ric.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.ric(ptr) +define <2 x i32> @test_xtensa_ae_l32x2_ric(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32x2_ric: + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.ric(ptr %ars) +; CHECK: ae_l32x2.ric aed{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32x2_rip.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.rip(ptr) +define <2 x i32> @test_xtensa_ae_l32x2_rip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32x2_rip: + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.rip(ptr %ars) +; CHECK: ae_l32x2.rip aed{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32x2_x.ll +declare <2 x i32> @llvm.xtensa.ae.l32x2.x(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32x2_x: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l32x2.x(ptr %ars, i32 %art) +; CHECK: ae_l32x2.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l32x2_xc.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.xc(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32x2_xc: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.xc(ptr %ars, i32 %art) +; CHECK: ae_l32x2.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32x2_xp.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.xp(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2_xp(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32x2_xp: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.xp(ptr %ars, i32 %art) +; CHECK: ae_l32x2.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32x2f24_i.ll +declare <2 x i32> @llvm.xtensa.ae.l32x2f24.i(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2f24_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32x2f24_i: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l32x2f24.i(ptr %ars, i32 -64) +; CHECK: ae_l32x2f24.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l32x2f24_ip.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.ip(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2f24_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32x2f24_ip: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.ip(ptr %ars, i32 0) +; CHECK: ae_l32x2f24.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32x2f24_ric.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.ric(ptr) +define <2 x i32> @test_xtensa_ae_l32x2f24_ric(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32x2f24_ric: + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.ric(ptr %ars) +; CHECK: ae_l32x2f24.ric aed{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32x2f24_rip.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.rip(ptr) +define <2 x i32> @test_xtensa_ae_l32x2f24_rip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l32x2f24_rip: + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.rip(ptr %ars) +; CHECK: ae_l32x2f24.rip aed{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32x2f24_x.ll +declare <2 x i32> @llvm.xtensa.ae.l32x2f24.x(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2f24_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32x2f24_x: + + +%ret = call <2 x i32> @llvm.xtensa.ae.l32x2f24.x(ptr %ars, i32 %art) +; CHECK: ae_l32x2f24.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_l32x2f24_xc.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.xc(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2f24_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32x2f24_xc: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.xc(ptr %ars, i32 %art) +; CHECK: ae_l32x2f24.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l32x2f24_xp.ll +declare { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.xp(ptr, i32) +define <2 x i32> @test_xtensa_ae_l32x2f24_xp(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l32x2f24_xp: + + +%ret = call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.xp(ptr %ars, i32 %art) +; CHECK: ae_l32x2f24.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_l64_i.ll +declare <1 x i64> @llvm.xtensa.ae.l64.i(ptr, i32) +define <1 x i64> @test_xtensa_ae_l64_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l64_i: + + +%ret = call <1 x i64> @llvm.xtensa.ae.l64.i(ptr %ars, i32 -64) +; CHECK: ae_l64.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_l64_ip.ll +declare { <1 x i64>, ptr } @llvm.xtensa.ae.l64.ip(ptr, i32) +define <1 x i64> @test_xtensa_ae_l64_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_l64_ip: + + +%ret = call { <1 x i64>, ptr } @llvm.xtensa.ae.l64.ip(ptr %ars, i32 -64) +; CHECK: ae_l64.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} +%ev = extractvalue { <1 x i64>, ptr } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_l64_x.ll +declare <1 x i64> @llvm.xtensa.ae.l64.x(ptr, i32) +define <1 x i64> @test_xtensa_ae_l64_x(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l64_x: + + +%ret = call <1 x i64> @llvm.xtensa.ae.l64.x(ptr %ars, i32 %art) +; CHECK: ae_l64.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_l64_xc.ll +declare { <1 x i64>, ptr } @llvm.xtensa.ae.l64.xc(ptr, i32) +define <1 x i64> @test_xtensa_ae_l64_xc(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l64_xc: + + +%ret = call { <1 x i64>, ptr } @llvm.xtensa.ae.l64.xc(ptr %ars, i32 %art) +; CHECK: ae_l64.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <1 x i64>, ptr } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_l64_xp.ll +declare { <1 x i64>, ptr } @llvm.xtensa.ae.l64.xp(ptr, i32) +define <1 x i64> @test_xtensa_ae_l64_xp(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_l64_xp: + + +%ret = call { <1 x i64>, ptr } @llvm.xtensa.ae.l64.xp(ptr %ars, i32 %art) +; CHECK: ae_l64.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <1 x i64>, ptr } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_la16x4_ic.ll +declare { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ic(<8 x i8>, ptr) +define <4 x i16> @test_xtensa_ae_la16x4_ic(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la16x4_ic: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ic(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la16x4.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, <8 x i8>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_la16x4_ip.ll +declare { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ip(<8 x i8>, ptr) +define <4 x i16> @test_xtensa_ae_la16x4_ip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la16x4_ip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la16x4.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, <8 x i8>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_la16x4_ric.ll +declare { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ric(<8 x i8>, ptr) +define <4 x i16> @test_xtensa_ae_la16x4_ric(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la16x4_ric: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ric(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la16x4.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, <8 x i8>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_la16x4_rip.ll +declare { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.rip(<8 x i8>, ptr) +define <4 x i16> @test_xtensa_ae_la16x4_rip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la16x4_rip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.rip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la16x4.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <4 x i16>, <8 x i8>, ptr } %ret, 0 +ret <4 x i16> %ev +} + + +;--- ae_la16x4neg_pc.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.la16x4neg.pc(ptr) +define <1 x i64> @test_xtensa_ae_la16x4neg_pc(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la16x4neg_pc: + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.la16x4neg.pc(ptr %ars) +; CHECK: ae_la16x4neg.pc u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_la16x4pos_pc.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.la16x4pos.pc(ptr) +define <1 x i64> @test_xtensa_ae_la16x4pos_pc(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la16x4pos_pc: + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.la16x4pos.pc(ptr %ars) +; CHECK: ae_la16x4pos.pc u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_la24_ic.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ic(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la24_ic(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24_ic: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ic(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la24.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la24_ip.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ip(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la24_ip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24_ip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la24.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la24_ric.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ric(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la24_ric(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24_ric: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ric(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la24.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la24_rip.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.rip(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la24_rip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24_rip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.rip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la24.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la24neg_pc.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.la24neg.pc(ptr) +define <1 x i64> @test_xtensa_ae_la24neg_pc(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24neg_pc: + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.la24neg.pc(ptr %ars) +; CHECK: ae_la24neg.pc u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_la24pos_pc.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.la24pos.pc(ptr) +define <1 x i64> @test_xtensa_ae_la24pos_pc(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24pos_pc: + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.la24pos.pc(ptr %ars) +; CHECK: ae_la24pos.pc u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_la24x2_ic.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ic(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la24x2_ic(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24x2_ic: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ic(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la24x2.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la24x2_ip.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ip(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la24x2_ip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24x2_ip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la24x2.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la24x2_ric.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ric(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la24x2_ric(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24x2_ric: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ric(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la24x2.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la24x2_rip.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.rip(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la24x2_rip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24x2_rip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.rip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la24x2.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la24x2neg_pc.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.la24x2neg.pc(ptr) +define <1 x i64> @test_xtensa_ae_la24x2neg_pc(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24x2neg_pc: + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.la24x2neg.pc(ptr %ars) +; CHECK: ae_la24x2neg.pc u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_la24x2pos_pc.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.la24x2pos.pc(ptr) +define <1 x i64> @test_xtensa_ae_la24x2pos_pc(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la24x2pos_pc: + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.la24x2pos.pc(ptr %ars) +; CHECK: ae_la24x2pos.pc u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_la32x2_ic.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ic(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la32x2_ic(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2_ic: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ic(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la32x2.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la32x2_ip.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ip(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la32x2_ip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2_ip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la32x2.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la32x2_ric.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ric(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la32x2_ric(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2_ric: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ric(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la32x2.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la32x2_rip.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.rip(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la32x2_rip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2_rip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.rip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la32x2.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la32x2f24_ic.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ic(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la32x2f24_ic(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2f24_ic: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ic(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la32x2f24.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la32x2f24_ip.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ip(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la32x2f24_ip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2f24_ip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la32x2f24.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la32x2f24_ric.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ric(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la32x2f24_ric(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2f24_ric: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ric(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la32x2f24.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la32x2f24_rip.ll +declare { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.rip(<8 x i8>, ptr) +define <2 x i32> @test_xtensa_ae_la32x2f24_rip(<1 x i64> %ae_ls_uu, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2f24_rip: +%wrap0 = bitcast <1 x i64> %ae_ls_uu to <8 x i8> + +%ret = call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.rip(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_la32x2f24.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <8 x i8>, ptr } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_la32x2neg_pc.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.la32x2neg.pc(ptr) +define <1 x i64> @test_xtensa_ae_la32x2neg_pc(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2neg_pc: + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.la32x2neg.pc(ptr %ars) +; CHECK: ae_la32x2neg.pc u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_la32x2pos_pc.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.la32x2pos.pc(ptr) +define <1 x i64> @test_xtensa_ae_la32x2pos_pc(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la32x2pos_pc: + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.la32x2pos.pc(ptr %ars) +; CHECK: ae_la32x2pos.pc u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_la64_pp.ll +declare <8 x i8> @llvm.xtensa.ae.la64.pp(ptr) +define <8 x i8> @test_xtensa_ae_la64_pp(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_la64_pp: + +%ret = call <8 x i8> @llvm.xtensa.ae.la64.pp(ptr %ars) +; CHECK: ae_la64.pp u{{[0-9]+}}, a{{[0-9]+}} + +ret <8 x i8> %ret +} + + +;--- ae_lalign64_i.ll +declare <8 x i8> @llvm.xtensa.ae.lalign64.i(ptr, i32) +define <8 x i8> @test_xtensa_ae_lalign64_i(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_lalign64_i: + + +%ret = call <8 x i8> @llvm.xtensa.ae.lalign64.i(ptr %ars, i32 -64) +; CHECK: ae_lalign64.i u{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret <8 x i8> %ret +} + + +;--- ae_lb.ll +declare i32 @llvm.xtensa.ae.lb(i32) +define i32 @test_xtensa_ae_lb(i32 %art) { +; CHECK-LABEL: test_xtensa_ae_lb: + +%ret = call i32 @llvm.xtensa.ae.lb(i32 %art) +; CHECK: ae_lb a{{[0-9]+}}, a{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_lbi.ll +declare i32 @llvm.xtensa.ae.lbi(i32) +define i32 @test_xtensa_ae_lbi() { +; CHECK-LABEL: test_xtensa_ae_lbi: + +%ret = call i32 @llvm.xtensa.ae.lbi(i32 1) +; CHECK: ae_lbi a{{[0-9]+}}, {{-?[0-9]+}} + +ret i32 %ret +} + + +;--- ae_lbk.ll +declare i32 @llvm.xtensa.ae.lbk(i32, i32) +define i32 @test_xtensa_ae_lbk(i32 %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_lbk: + + +%ret = call i32 @llvm.xtensa.ae.lbk(i32 %ars, i32 %art) +; CHECK: ae_lbk a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_lbki.ll +declare i32 @llvm.xtensa.ae.lbki(i32, i32) +define i32 @test_xtensa_ae_lbki(i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_lbki: + + +%ret = call i32 @llvm.xtensa.ae.lbki(i32 %ars, i32 1) +; CHECK: ae_lbki a{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret i32 %ret +} + + +;--- ae_lbs.ll +declare i32 @llvm.xtensa.ae.lbs(i32) +define i32 @test_xtensa_ae_lbs(i32 %art) { +; CHECK-LABEL: test_xtensa_ae_lbs: + +%ret = call i32 @llvm.xtensa.ae.lbs(i32 %art) +; CHECK: ae_lbs a{{[0-9]+}}, a{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_lbsi.ll +declare i32 @llvm.xtensa.ae.lbsi(i32) +define i32 @test_xtensa_ae_lbsi() { +; CHECK-LABEL: test_xtensa_ae_lbsi: + +%ret = call i32 @llvm.xtensa.ae.lbsi(i32 1) +; CHECK: ae_lbsi a{{[0-9]+}}, {{-?[0-9]+}} + +ret i32 %ret +} + + +;--- ae_le64.ll +declare <1 x i1> @llvm.xtensa.ae.le64(<1 x i64>, <1 x i64>) +define <1 x i1> @test_xtensa_ae_le64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_le64: + + +%ret = call <1 x i1> @llvm.xtensa.ae.le64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) +; CHECK: ae_le64 b{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i1> %ret +} + + +;--- ae_lt64.ll +declare <1 x i1> @llvm.xtensa.ae.lt64(<1 x i64>, <1 x i64>) +define <1 x i1> @test_xtensa_ae_lt64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_lt64: + + +%ret = call <1 x i1> @llvm.xtensa.ae.lt64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) +; CHECK: ae_lt64 b{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i1> %ret +} + + +;--- ae_max32.ll +declare <2 x i32> @llvm.xtensa.ae.max32(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_max32(<2 x i32> %ae_cmpp_v0, <2 x i32> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_max32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.max32(<2 x i32> %ae_cmpp_v0, <2 x i32> %ae_cmpp_v1) +; CHECK: ae_max32 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_max64.ll +declare <1 x i64> @llvm.xtensa.ae.max64(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_max64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_max64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.max64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) +; CHECK: ae_max64 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_maxabs32s.ll +declare <2 x i32> @llvm.xtensa.ae.maxabs32s(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_maxabs32s(<2 x i32> %ae_cmpp_v0, <2 x i32> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_maxabs32s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.maxabs32s(<2 x i32> %ae_cmpp_v0, <2 x i32> %ae_cmpp_v1) +; CHECK: ae_maxabs32s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_maxabs64s.ll +declare <1 x i64> @llvm.xtensa.ae.maxabs64s(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_maxabs64s(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_maxabs64s: + + +%ret = call <1 x i64> @llvm.xtensa.ae.maxabs64s(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) +; CHECK: ae_maxabs64s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_min32.ll +declare <2 x i32> @llvm.xtensa.ae.min32(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_min32(<2 x i32> %ae_cmpp_v0, <2 x i32> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_min32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.min32(<2 x i32> %ae_cmpp_v0, <2 x i32> %ae_cmpp_v1) +; CHECK: ae_min32 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_min64.ll +declare <1 x i64> @llvm.xtensa.ae.min64(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_min64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_min64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.min64(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) +; CHECK: ae_min64 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_minabs32s.ll +declare <2 x i32> @llvm.xtensa.ae.minabs32s(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_minabs32s(<2 x i32> %ae_cmpp_v0, <2 x i32> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_minabs32s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.minabs32s(<2 x i32> %ae_cmpp_v0, <2 x i32> %ae_cmpp_v1) +; CHECK: ae_minabs32s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_minabs64s.ll +declare <1 x i64> @llvm.xtensa.ae.minabs64s(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_minabs64s(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) { +; CHECK-LABEL: test_xtensa_ae_minabs64s: + + +%ret = call <1 x i64> @llvm.xtensa.ae.minabs64s(<1 x i64> %ae_cmpp_v0, <1 x i64> %ae_cmpp_v1) +; CHECK: ae_minabs64s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mov.ll +declare <1 x i64> @llvm.xtensa.ae.mov(<1 x i64>) +define <1 x i64> @test_xtensa_ae_mov(<1 x i64> %ae_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_mov: + +%ret = call <1 x i64> @llvm.xtensa.ae.mov(<1 x i64> %ae_to_dr_v0) +; CHECK: ae_mov aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_movad16_0.ll +declare i32 @llvm.xtensa.ae.movad16.0(<4 x i16>) +define i32 @test_xtensa_ae_movad16_0(<4 x i16> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_movad16_0: + +%ret = call i32 @llvm.xtensa.ae.movad16.0(<4 x i16> %ae_dr_to_ar_v0) +; CHECK: ae_movad16.0 a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_movad16_1.ll +declare i32 @llvm.xtensa.ae.movad16.1(<4 x i16>) +define i32 @test_xtensa_ae_movad16_1(<4 x i16> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_movad16_1: + +%ret = call i32 @llvm.xtensa.ae.movad16.1(<4 x i16> %ae_dr_to_ar_v0) +; CHECK: ae_movad16.1 a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_movad16_2.ll +declare i32 @llvm.xtensa.ae.movad16.2(<4 x i16>) +define i32 @test_xtensa_ae_movad16_2(<4 x i16> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_movad16_2: + +%ret = call i32 @llvm.xtensa.ae.movad16.2(<4 x i16> %ae_dr_to_ar_v0) +; CHECK: ae_movad16.2 a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_movad16_3.ll +declare i32 @llvm.xtensa.ae.movad16.3(<4 x i16>) +define i32 @test_xtensa_ae_movad16_3(<4 x i16> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_movad16_3: + +%ret = call i32 @llvm.xtensa.ae.movad16.3(<4 x i16> %ae_dr_to_ar_v0) +; CHECK: ae_movad16.3 a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_movad32_h.ll +declare i32 @llvm.xtensa.ae.movad32.h(<2 x i32>) +define i32 @test_xtensa_ae_movad32_h(<2 x i32> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_movad32_h: + +%ret = call i32 @llvm.xtensa.ae.movad32.h(<2 x i32> %ae_dr_to_ar_v0) +; CHECK: ae_movad32.h a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_movad32_l.ll +declare i32 @llvm.xtensa.ae.movad32.l(<2 x i32>) +define i32 @test_xtensa_ae_movad32_l(<2 x i32> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_movad32_l: + +%ret = call i32 @llvm.xtensa.ae.movad32.l(<2 x i32> %ae_dr_to_ar_v0) +; CHECK: ae_movad32.l a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_movalign.ll +declare <8 x i8> @llvm.xtensa.ae.movalign(<8 x i8>) +define <8 x i8> @test_xtensa_ae_movalign(<1 x i64> %ae_uu_v) { +; CHECK-LABEL: test_xtensa_ae_movalign: +%wrap0 = bitcast <1 x i64> %ae_uu_v to <8 x i8> +%ret = call <8 x i8> @llvm.xtensa.ae.movalign(<8 x i8> %wrap0) +; CHECK: ae_movalign u{{[0-9]+}}, u{{[0-9]+}} + +ret <8 x i8> %ret +} + + +;--- ae_movda16.ll +declare <4 x i16> @llvm.xtensa.ae.movda16(i32) +define <4 x i16> @test_xtensa_ae_movda16(i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_movda16: + +%ret = call <4 x i16> @llvm.xtensa.ae.movda16(i32 %ars) +; CHECK: ae_movda16 aed{{[0-9]+}}, a{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_movda16x2.ll +declare <4 x i16> @llvm.xtensa.ae.movda16x2(i32, i32) +define <4 x i16> @test_xtensa_ae_movda16x2(i32 %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_movda16x2: + + +%ret = call <4 x i16> @llvm.xtensa.ae.movda16x2(i32 %ars, i32 %art) +; CHECK: ae_movda16x2 aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_movda32.ll +declare <1 x i32> @llvm.xtensa.ae.movda32(i32) +define <1 x i32> @test_xtensa_ae_movda32(i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_movda32: + +%ret = call <1 x i32> @llvm.xtensa.ae.movda32(i32 %ars) +; CHECK: ae_movda32 aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i32> %ret +} + + +;--- ae_movda32x2.ll +declare <2 x i32> @llvm.xtensa.ae.movda32x2(i32, i32) +define <2 x i32> @test_xtensa_ae_movda32x2(i32 %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_movda32x2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.movda32x2(i32 %ars, i32 %art) +; CHECK: ae_movda32x2 aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_movf64.ll +declare <1 x i64> @llvm.xtensa.ae.movf64(<1 x i64>, <1 x i64>, <1 x i1>) +define <1 x i64> @test_xtensa_ae_movf64(<1 x i64> %ae_cmov_v, <1 x i64> %ae_cmov_v0, <1 x i1> %bt) { +; CHECK-LABEL: test_xtensa_ae_movf64: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.movf64(<1 x i64> %ae_cmov_v, <1 x i64> %ae_cmov_v0, <1 x i1> %bt) +; CHECK: ae_movf64 aed{{[0-9]+}}, aed{{[0-9]+}}, b{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_movi.ll +declare <2 x i32> @llvm.xtensa.ae.movi(i32) +define <2 x i32> @test_xtensa_ae_movi() { +; CHECK-LABEL: test_xtensa_ae_movi: + +%ret = call <2 x i32> @llvm.xtensa.ae.movi(i32 -16) +; CHECK: ae_movi aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_movt64.ll +declare <1 x i64> @llvm.xtensa.ae.movt64(<1 x i64>, <1 x i64>, <1 x i1>) +define <1 x i64> @test_xtensa_ae_movt64(<1 x i64> %ae_cmov_v, <1 x i64> %ae_cmov_v0, <1 x i1> %bt) { +; CHECK-LABEL: test_xtensa_ae_movt64: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.movt64(<1 x i64> %ae_cmov_v, <1 x i64> %ae_cmov_v0, <1 x i1> %bt) +; CHECK: ae_movt64 aed{{[0-9]+}}, aed{{[0-9]+}}, b{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul16x4.ll +declare { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mul16x4(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mul16x4(<4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) { +; CHECK-LABEL: test_xtensa_ae_mul16x4: + + +%ret = call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mul16x4(<4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) +; CHECK: ae_mul16x4 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <2 x i32> } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_mul32_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mul32.hh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mul32_hh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32_hh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32.hh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mul32.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mul32_lh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32.lh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mul32.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mul32_ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32.ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mul32_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mul32.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32u_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mul32u.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mul32u_ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32u_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32u.ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32u.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_h0.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.h0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_h0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_h0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.h0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32x16.h0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_h0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.h0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_h0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_h0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.h0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mul32x16.h0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_h1.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.h1(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_h1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_h1: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.h1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32x16.h1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_h1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.h1.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_h1_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_h1_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.h1.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mul32x16.h1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_h2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.h2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_h2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_h2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.h2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32x16.h2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_h2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.h2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_h2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_h2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.h2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mul32x16.h2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_h3.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.h3(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_h3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_h3: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.h3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32x16.h3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_h3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.h3.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_h3_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_h3_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.h3.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mul32x16.h3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32x16.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mul32x16.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_l1.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.l1(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_l1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_l1: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.l1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32x16.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.l1.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_l1_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_l1_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.l1.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mul32x16.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32x16.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mul32x16.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_l3.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.l3(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_l3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_l3: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.l3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mul32x16.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mul32x16_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mul32x16.l3.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mul32x16_l3_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mul32x16_l3_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mul32x16.l3.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mul32x16.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula16x4.ll +declare { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mula16x4(<2 x i32>, <2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mula16x4(<2 x i32> %ae_mul_q1, <2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) { +; CHECK-LABEL: test_xtensa_ae_mula16x4: + + + + +%ret = call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mula16x4(<2 x i32> %ae_mul_q1, <2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) +; CHECK: ae_mula16x4 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <2 x i32> } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_mula32_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mula32.hh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mula32_hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32_hh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32.hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mula32.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mula32_lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32.lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mula32.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mula32_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mula32_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mula32.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32u_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mula32u.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mula32u_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32u_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32u.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32u.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_h0.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.h0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_h0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_h0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.h0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32x16.h0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_h0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.h0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_h0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_h0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.h0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mula32x16.h0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_h1.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.h1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_h1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_h1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.h1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32x16.h1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_h1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.h1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_h1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_h1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.h1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mula32x16.h1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_h2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.h2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_h2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_h2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.h2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32x16.h2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_h2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.h2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_h2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_h2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.h2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mula32x16.h2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_h3.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.h3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_h3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_h3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.h3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32x16.h3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_h3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.h3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_h3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_h3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.h3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mula32x16.h3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32x16.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mula32x16.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_l1.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.l1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_l1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32x16.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.l1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_l1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_l1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.l1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mula32x16.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32x16.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mula32x16.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_l3.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.l3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_l3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mula32x16.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mula32x16_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mula32x16.l3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mula32x16_l3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mula32x16_l3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mula32x16.l3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mula32x16.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad24.hh.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaad24_hh_ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad24_hh_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad24.hh.ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulaad24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad24.hh.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaad24_hh_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad24_hh_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad24.hh.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaad24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad24.hl.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaad24_hl_lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad24_hl_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad24.hl.lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulaad24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad24.hl.lh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaad24_hl_lh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad24_hl_lh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad24.hl.lh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaad24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad32x16_h0_l1.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad32x16.h0.l1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaad32x16_h0_l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad32x16_h0_l1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h0.l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaad32x16.h0.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad32x16_h0_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad32x16.h0.l1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaad32x16_h0_l1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad32x16_h0_l1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h0.l1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaad32x16.h0.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad32x16.h1.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaad32x16_h1_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad32x16_h1_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h1.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaad32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad32x16.h1.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaad32x16_h1_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad32x16_h1_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h1.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaad32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad32x16_h2_l3.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad32x16.h2.l3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaad32x16_h2_l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad32x16_h2_l3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h2.l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaad32x16.h2.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad32x16_h2_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad32x16.h2.l3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaad32x16_h2_l3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad32x16_h2_l3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h2.l3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaad32x16.h2.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad32x16.h3.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaad32x16_h3_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad32x16_h3_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h3.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaad32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaad32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaad32x16.h3.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaad32x16_h3_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaad32x16_h3_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h3.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaad32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd16ss_11_00.ll +declare <2 x i32> @llvm.xtensa.ae.mulaafd16ss.11.00(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaafd16ss_11_00(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd16ss_11_00: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.11.00(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaafd16ss.11_00 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaafd16ss_11_00_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulaafd16ss.11.00.s2(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaafd16ss_11_00_s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd16ss_11_00_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.11.00.s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaafd16ss.11_00_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaafd16ss_13_02.ll +declare <2 x i32> @llvm.xtensa.ae.mulaafd16ss.13.02(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaafd16ss_13_02(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd16ss_13_02: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.13.02(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaafd16ss.13_02 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaafd16ss_13_02_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulaafd16ss.13.02.s2(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaafd16ss_13_02_s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd16ss_13_02_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.13.02.s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaafd16ss.13_02_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaafd16ss_33_22.ll +declare <2 x i32> @llvm.xtensa.ae.mulaafd16ss.33.22(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaafd16ss_33_22(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd16ss_33_22: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.33.22(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaafd16ss.33_22 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaafd16ss_33_22_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulaafd16ss.33.22.s2(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaafd16ss_33_22_s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd16ss_33_22_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.33.22.s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaafd16ss.33_22_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaafd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd24.hh.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaafd24_hh_ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd24_hh_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd24.hh.ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulaafd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd24.hh.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaafd24_hh_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd24_hh_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd24.hh.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaafd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd24.hl.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaafd24_hl_lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd24_hl_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd24.hl.lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulaafd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd24.hl.lh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaafd24_hl_lh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd24_hl_lh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd24.hl.lh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaafd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd32x16_h0_l1.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h0.l1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaafd32x16_h0_l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd32x16_h0_l1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h0.l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaafd32x16.h0.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd32x16_h0_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h0.l1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaafd32x16_h0_l1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd32x16_h0_l1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h0.l1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaafd32x16.h0.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h1.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaafd32x16_h1_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd32x16_h1_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h1.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaafd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h1.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaafd32x16_h1_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd32x16_h1_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h1.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaafd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd32x16_h2_l3.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h2.l3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaafd32x16_h2_l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd32x16_h2_l3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h2.l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaafd32x16.h2.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd32x16_h2_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h2.l3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaafd32x16_h2_l3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd32x16_h2_l3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h2.l3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaafd32x16.h2.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h3.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaafd32x16_h3_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd32x16_h3_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h3.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaafd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaafd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h3.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaafd32x16_h3_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaafd32x16_h3_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h3.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaafd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulac24.ll +declare <2 x i32> @llvm.xtensa.ae.mulac24(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulac24(<2 x i32> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulac24: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulac24(<2 x i32> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulac24 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulac32x16_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulac32x16.h(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulac32x16_h(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulac32x16_h: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulac32x16.h(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulac32x16.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulac32x16_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulac32x16.l(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulac32x16_l(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulac32x16_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulac32x16.l(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulac32x16.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_00.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.00(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_00(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_00: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.00(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf16ss.00 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_00_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.00.s2(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_00_s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_00_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.00.s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaf16ss.00_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_10.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.10(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_10(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_10: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.10(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulaf16ss.10 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_11.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.11(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_11(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_11: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.11(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulaf16ss.11 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_20.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.20(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_20(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_20: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.20(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulaf16ss.20 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_21.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.21(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_21(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_21: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.21(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulaf16ss.21 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_22.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.22(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_22(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_22: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.22(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulaf16ss.22 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_30.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.30(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_30(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_30: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.30(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulaf16ss.30 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_31.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.31(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_31(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_31: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.31(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulaf16ss.31 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_32.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.32(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_32(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_32: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.32(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulaf16ss.32 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16ss_33.ll +declare <2 x i32> @llvm.xtensa.ae.mulaf16ss.33(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16ss_33(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf16ss_33: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulaf16ss.33(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulaf16ss.33 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaf16x4ss.ll +declare { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulaf16x4ss(<2 x i32>, <2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulaf16x4ss(<2 x i32> %ae_mul_q1, <2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) { +; CHECK-LABEL: test_xtensa_ae_mulaf16x4ss: + + + + +%ret = call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulaf16x4ss(<2 x i32> %ae_mul_q1, <2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) +; CHECK: ae_mulaf16x4ss aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <2 x i32> } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_mulaf32r_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32r.hh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf32r_hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32r_hh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32r.hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32r.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32r_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32r.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf32r_lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32r_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32r.lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32r.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32r_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32r.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf32r_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32r_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32r.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32r.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32r_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32r.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf32r_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32r_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32r.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaf32r.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32s_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32s.hh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf32s_hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32s_hh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32s.hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32s.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32s_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32s.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf32s_lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32s_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32s.lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32s.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32s_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32s.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf32s_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32s_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32s.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32s.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32s_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32s.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf32s_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32s_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32s.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaf32s.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_h0.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.h0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_h0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_h0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32x16.h0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_h0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.h0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_h0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_h0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaf32x16.h0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_h1.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.h1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_h1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_h1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32x16.h1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_h1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.h1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_h1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_h1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaf32x16.h1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_h2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.h2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_h2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_h2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32x16.h2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_h2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.h2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_h2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_h2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaf32x16.h2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_h3.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.h3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_h3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_h3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32x16.h3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_h3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.h3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_h3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_h3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaf32x16.h3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32x16.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaf32x16.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_l1.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.l1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_l1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32x16.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.l1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_l1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_l1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaf32x16.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32x16.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaf32x16.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_l3.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.l3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_l3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf32x16.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf32x16_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf32x16.l3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulaf32x16_l3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf32x16_l3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulaf32x16.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf48q32sp16s_l.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16s.l(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf48q32sp16s_l(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf48q32sp16s_l: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16s.l(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf48q32sp16s.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf48q32sp16s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16s.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf48q32sp16s_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf48q32sp16s_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16s.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaf48q32sp16s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf48q32sp16u_l.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16u.l(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf48q32sp16u_l(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf48q32sp16u_l: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16u.l(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulaf48q32sp16u.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaf48q32sp16u_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16u.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaf48q32sp16u_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaf48q32sp16u_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16u.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaf48q32sp16u.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulafc24ra.ll +declare <2 x i32> @llvm.xtensa.ae.mulafc24ra(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulafc24ra(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <2 x i32> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafc24ra: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafc24ra(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <2 x i32> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulafc24ra aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafc32x16ras_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulafc32x16ras.h(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafc32x16ras_h(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafc32x16ras_h: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafc32x16ras.h(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulafc32x16ras.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafc32x16ras_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulafc32x16ras.l(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafc32x16ras_l(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafc32x16ras_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafc32x16ras.l(<2 x i32> %opnd_ae_sem_mul_x4_q0, <2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulafc32x16ras.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafd24x2_fir_h.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd24x2.fir.h(<1 x i64>, <1 x i64>, <2 x i32>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulafd24x2_fir_h(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <2 x i32> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulafd24x2_fir_h: + + + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd24x2.fir.h(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <2 x i32> %ae_mul_d2) +; CHECK: ae_mulafd24x2.fir.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulafd24x2_fir_l.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd24x2.fir.l(<1 x i64>, <1 x i64>, <2 x i32>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulafd24x2_fir_l(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <2 x i32> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulafd24x2_fir_l: + + + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd24x2.fir.l(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <2 x i32> %ae_mul_d2) +; CHECK: ae_mulafd24x2.fir.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulafd32x16x2_fir_hh.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.hh(<1 x i64>, <1 x i64>, <2 x i32>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulafd32x16x2_fir_hh(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulafd32x16x2_fir_hh: + + + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.hh(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) +; CHECK: ae_mulafd32x16x2.fir.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulafd32x16x2_fir_hl.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.hl(<1 x i64>, <1 x i64>, <2 x i32>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulafd32x16x2_fir_hl(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulafd32x16x2_fir_hl: + + + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.hl(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) +; CHECK: ae_mulafd32x16x2.fir.hl aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulafd32x16x2_fir_lh.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.lh(<1 x i64>, <1 x i64>, <2 x i32>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulafd32x16x2_fir_lh(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulafd32x16x2_fir_lh: + + + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.lh(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) +; CHECK: ae_mulafd32x16x2.fir.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulafd32x16x2_fir_ll.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.ll(<1 x i64>, <1 x i64>, <2 x i32>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulafd32x16x2_fir_ll(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulafd32x16x2_fir_ll: + + + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.ll(<1 x i64> %ae_mul_q0, <1 x i64> %ae_mul_q1, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) +; CHECK: ae_mulafd32x16x2.fir.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulafp24x2r.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp24x2r(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulafp24x2r(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp24x2r: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp24x2r(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulafp24x2r aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp24x2r_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp24x2r.s2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulafp24x2r_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp24x2r_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp24x2r.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulafp24x2r_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp24x2ra.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp24x2ra(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulafp24x2ra(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp24x2ra: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp24x2ra(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulafp24x2ra aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp24x2ra_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp24x2ra.s2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulafp24x2ra_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp24x2ra_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp24x2ra.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulafp24x2ra_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x16x2ras_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.h(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafp32x16x2ras_h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x16x2ras_h: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulafp32x16x2ras.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x16x2ras_h_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.h.s2(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafp32x16x2ras_h_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x16x2ras_h_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.h.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulafp32x16x2ras.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x16x2ras_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.l(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafp32x16x2ras_l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x16x2ras_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulafp32x16x2ras.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x16x2ras_l_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.l.s2(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafp32x16x2ras_l_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x16x2ras_l_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.l.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulafp32x16x2ras.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x16x2rs_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.h(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafp32x16x2rs_h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x16x2rs_h: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulafp32x16x2rs.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x16x2rs_h_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.h.s2(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafp32x16x2rs_h_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x16x2rs_h_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.h.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulafp32x16x2rs.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x16x2rs_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.l(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafp32x16x2rs_l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x16x2rs_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulafp32x16x2rs.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x16x2rs_l_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.l.s2(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulafp32x16x2rs_l_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x16x2rs_l_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.l.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulafp32x16x2rs.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x2ras.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x2ras(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulafp32x2ras(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x2ras: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x2ras(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulafp32x2ras aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafp32x2rs.ll +declare <2 x i32> @llvm.xtensa.ae.mulafp32x2rs(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulafp32x2rs(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafp32x2rs: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulafp32x2rs(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulafp32x2rs aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulafq32sp24s_h_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulafq32sp24s.h.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulafq32sp24s_h_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafq32sp24s_h_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulafq32sp24s.h.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulafq32sp24s.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulafq32sp24s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulafq32sp24s.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulafq32sp24s_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulafq32sp24s_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulafq32sp24s.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulafq32sp24s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulap24x2.ll +declare <2 x i32> @llvm.xtensa.ae.mulap24x2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulap24x2(<2 x i32> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulap24x2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulap24x2(<2 x i32> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulap24x2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulap24x2_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulap24x2.s2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulap24x2_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulap24x2_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulap24x2.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulap24x2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulap32x16x2_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulap32x16x2.h(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulap32x16x2_h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulap32x16x2_h: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulap32x16x2.h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulap32x16x2.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulap32x16x2_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulap32x16x2.l(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulap32x16x2_l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulap32x16x2_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulap32x16x2.l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulap32x16x2.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulap32x2.ll +declare <2 x i32> @llvm.xtensa.ae.mulap32x2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulap32x2(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulap32x2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulap32x2(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulap32x2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulaq32sp16s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaq32sp16s.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaq32sp16s_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaq32sp16s_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaq32sp16s.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaq32sp16s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulaq32sp16u_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulaq32sp16u.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulaq32sp16u_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulaq32sp16u_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulaq32sp16u.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulaq32sp16u.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mularfq32sp24s_h_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mularfq32sp24s.h.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mularfq32sp24s_h_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mularfq32sp24s_h_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mularfq32sp24s.h.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mularfq32sp24s.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mularfq32sp24s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mularfq32sp24s.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mularfq32sp24s_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mularfq32sp24s_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mularfq32sp24s.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mularfq32sp24s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulas32f48p16s_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.hh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulas32f48p16s_hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulas32f48p16s_hh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulas32f48p16s.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulas32f48p16s_hh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.hh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulas32f48p16s_hh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulas32f48p16s_hh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.hh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulas32f48p16s.hh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulas32f48p16s_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulas32f48p16s_lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulas32f48p16s_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulas32f48p16s.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulas32f48p16s_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.lh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulas32f48p16s_lh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulas32f48p16s_lh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.lh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulas32f48p16s.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulas32f48p16s_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulas32f48p16s_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulas32f48p16s_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulas32f48p16s.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulas32f48p16s_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulas32f48p16s_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulas32f48p16s_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulas32f48p16s.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulasd24.hh.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulasd24_hh_ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasd24_hh_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasd24.hh.ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulasd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasd24.hh.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulasd24_hh_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasd24_hh_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasd24.hh.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulasd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulasd24.hl.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulasd24_hl_lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasd24_hl_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasd24.hl.lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulasd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasd24.hl.lh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulasd24_hl_lh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasd24_hl_lh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasd24.hl.lh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulasd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulasd32x16.h1.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulasd32x16_h1_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasd32x16_h1_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h1.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulasd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasd32x16.h1.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulasd32x16_h1_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasd32x16_h1_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h1.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulasd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasd32x16.h3.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulasd32x16_h3_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasd32x16_h3_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h3.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulasd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasd32x16.h3.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulasd32x16_h3_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasd32x16_h3_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h3.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulasd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasfd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulasfd24.hh.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulasfd24_hh_ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasfd24_hh_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasfd24.hh.ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulasfd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasfd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasfd24.hh.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulasfd24_hh_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasfd24_hh_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasfd24.hh.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulasfd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasfd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulasfd24.hl.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulasfd24_hl_lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasfd24_hl_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasfd24.hl.lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulasfd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasfd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasfd24.hl.lh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulasfd24_hl_lh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasfd24_hl_lh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasfd24.hl.lh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulasfd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasfd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h1.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulasfd32x16_h1_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasfd32x16_h1_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h1.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulasfd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasfd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h1.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulasfd32x16_h1_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasfd32x16_h1_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h1.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulasfd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasfd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h3.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulasfd32x16_h3_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasfd32x16_h3_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h3.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulasfd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulasfd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h3.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulasfd32x16_h3_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulasfd32x16_h3_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h3.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulasfd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulc24.ll +declare <2 x i32> @llvm.xtensa.ae.mulc24(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulc24(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulc24: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulc24(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulc24 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulc32x16_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulc32x16.h(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulc32x16_h(<2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulc32x16_h: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulc32x16.h(<2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulc32x16.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulc32x16_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulc32x16.l(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulc32x16_l(<2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulc32x16_l: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulc32x16.l(<2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulc32x16.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_00.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.00(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_00(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_00: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.00(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf16ss.00 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_00_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.00.s2(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_00_s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_00_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.00.s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulf16ss.00_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_10.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.10(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_10(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_10: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.10(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulf16ss.10 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_11.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.11(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_11(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_11: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.11(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulf16ss.11 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_20.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.20(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_20(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_20: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.20(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulf16ss.20 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_21.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.21(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_21(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_21: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.21(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulf16ss.21 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_22.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.22(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_22(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_22: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.22(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulf16ss.22 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_30.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.30(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_30(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_30: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.30(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulf16ss.30 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_31.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.31(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_31(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_31: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.31(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulf16ss.31 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_32.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.32(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_32(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.32(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulf16ss.32 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16ss_33.ll +declare <2 x i32> @llvm.xtensa.ae.mulf16ss.33(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16ss_33(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf16ss_33: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulf16ss.33(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulf16ss.33 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulf16x4ss.ll +declare { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulf16x4ss(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulf16x4ss(<4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) { +; CHECK-LABEL: test_xtensa_ae_mulf16x4ss: + + +%ret = call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulf16x4ss(<4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) +; CHECK: ae_mulf16x4ss aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <2 x i32> } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_mulf32r_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32r.hh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf32r_hh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32r_hh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32r.hh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32r.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32r_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32r.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf32r_lh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32r_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32r.lh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32r.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32r_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32r.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf32r_ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32r_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32r.ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32r.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32r_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32r.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf32r_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32r_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32r.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulf32r.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32s_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32s.hh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf32s_hh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32s_hh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32s.hh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32s.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32s_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32s.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf32s_lh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32s_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32s.lh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32s.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32s_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32s.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf32s_ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32s_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32s.ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32s.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32s_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32s.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf32s_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32s_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32s.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulf32s.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_h0.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.h0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_h0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_h0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.h0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32x16.h0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_h0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.h0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_h0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_h0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.h0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulf32x16.h0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_h1.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.h1(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_h1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_h1: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.h1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32x16.h1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_h1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.h1.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_h1_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_h1_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.h1.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulf32x16.h1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_h2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.h2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_h2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_h2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.h2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32x16.h2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_h2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.h2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_h2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_h2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.h2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulf32x16.h2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_h3.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.h3(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_h3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_h3: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.h3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32x16.h3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_h3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.h3.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_h3_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_h3_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.h3.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulf32x16.h3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32x16.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulf32x16.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_l1.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.l1(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_l1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_l1: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.l1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32x16.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.l1.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_l1_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_l1_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.l1.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulf32x16.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32x16.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulf32x16.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_l3.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.l3(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_l3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_l3: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.l3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf32x16.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf32x16_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf32x16.l3.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulf32x16_l3_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf32x16_l3_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf32x16.l3.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulf32x16.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf48q32sp16s_l.ll +declare <1 x i64> @llvm.xtensa.ae.mulf48q32sp16s.l(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf48q32sp16s_l(<1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf48q32sp16s_l: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16s.l(<1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf48q32sp16s.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf48q32sp16s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf48q32sp16s.l.s2(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf48q32sp16s_l_s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf48q32sp16s_l_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16s.l.s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulf48q32sp16s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf48q32sp16u_l.ll +declare <1 x i64> @llvm.xtensa.ae.mulf48q32sp16u.l(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf48q32sp16u_l(<1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf48q32sp16u_l: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16u.l(<1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulf48q32sp16u.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulf48q32sp16u_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulf48q32sp16u.l.s2(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulf48q32sp16u_l_s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulf48q32sp16u_l_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16u.l.s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulf48q32sp16u.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulfc24ra.ll +declare <2 x i32> @llvm.xtensa.ae.mulfc24ra(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulfc24ra(<2 x i32> %opnd_ae_sem_mul_x4_d0, <2 x i32> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfc24ra: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfc24ra(<2 x i32> %opnd_ae_sem_mul_x4_d0, <2 x i32> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulfc24ra aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfc32x16ras_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulfc32x16ras.h(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfc32x16ras_h(<2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfc32x16ras_h: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfc32x16ras.h(<2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulfc32x16ras.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfc32x16ras_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulfc32x16ras.l(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfc32x16ras_l(<2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfc32x16ras_l: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfc32x16ras.l(<2 x i32> %opnd_ae_sem_mul_x4_d0, <4 x i16> %opnd_ae_sem_mul_x4_d1) +; CHECK: ae_mulfc32x16ras.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfd24x2_fir_h.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd24x2.fir.h(<2 x i32>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulfd24x2_fir_h(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <2 x i32> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulfd24x2_fir_h: + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd24x2.fir.h(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <2 x i32> %ae_mul_d2) +; CHECK: ae_mulfd24x2.fir.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulfd24x2_fir_l.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd24x2.fir.l(<2 x i32>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulfd24x2_fir_l(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <2 x i32> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulfd24x2_fir_l: + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd24x2.fir.l(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <2 x i32> %ae_mul_d2) +; CHECK: ae_mulfd24x2.fir.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulfd32x16x2_fir_hh.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.hh(<2 x i32>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulfd32x16x2_fir_hh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulfd32x16x2_fir_hh: + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.hh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) +; CHECK: ae_mulfd32x16x2.fir.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulfd32x16x2_fir_hl.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.hl(<2 x i32>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulfd32x16x2_fir_hl(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulfd32x16x2_fir_hl: + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.hl(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) +; CHECK: ae_mulfd32x16x2.fir.hl aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulfd32x16x2_fir_lh.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.lh(<2 x i32>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulfd32x16x2_fir_lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulfd32x16x2_fir_lh: + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) +; CHECK: ae_mulfd32x16x2.fir.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulfd32x16x2_fir_ll.ll +declare { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.ll(<2 x i32>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulfd32x16x2_fir_ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) { +; CHECK-LABEL: test_xtensa_ae_mulfd32x16x2_fir_ll: + + + +%ret = call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1, <4 x i16> %ae_mul_d2) +; CHECK: ae_mulfd32x16x2.fir.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <1 x i64>, <1 x i64> } %ret, 0 +ret <1 x i64> %ev +} + + +;--- ae_mulfp16x4ras.ll +declare <4 x i16> @llvm.xtensa.ae.mulfp16x4ras(<4 x i16>, <4 x i16>) +define <4 x i16> @test_xtensa_ae_mulfp16x4ras(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp16x4ras: + + +%ret = call <4 x i16> @llvm.xtensa.ae.mulfp16x4ras(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulfp16x4ras aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_mulfp16x4s.ll +declare <4 x i16> @llvm.xtensa.ae.mulfp16x4s(<4 x i16>, <4 x i16>) +define <4 x i16> @test_xtensa_ae_mulfp16x4s(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp16x4s: + + +%ret = call <4 x i16> @llvm.xtensa.ae.mulfp16x4s(<4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulfp16x4s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_mulfp24x2r.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp24x2r(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulfp24x2r(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp24x2r: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp24x2r(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulfp24x2r aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp24x2r_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp24x2r.s2(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulfp24x2r_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp24x2r_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp24x2r.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulfp24x2r_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp24x2ra.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp24x2ra(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulfp24x2ra(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp24x2ra: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp24x2ra(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulfp24x2ra aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp24x2ra_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp24x2ra.s2(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulfp24x2ra_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp24x2ra_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp24x2ra.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulfp24x2ra_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x16x2ras_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.h(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfp32x16x2ras_h(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x16x2ras_h: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.h(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulfp32x16x2ras.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x16x2ras_h_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.h.s2(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfp32x16x2ras_h_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x16x2ras_h_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.h.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulfp32x16x2ras.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x16x2ras_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.l(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfp32x16x2ras_l(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x16x2ras_l: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.l(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulfp32x16x2ras.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x16x2ras_l_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.l.s2(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfp32x16x2ras_l_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x16x2ras_l_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.l.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulfp32x16x2ras.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x16x2rs_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.h(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfp32x16x2rs_h(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x16x2rs_h: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.h(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulfp32x16x2rs.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x16x2rs_h_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.h.s2(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfp32x16x2rs_h_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x16x2rs_h_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.h.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulfp32x16x2rs.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x16x2rs_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.l(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfp32x16x2rs_l(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x16x2rs_l: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.l(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulfp32x16x2rs.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x16x2rs_l_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.l.s2(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulfp32x16x2rs_l_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x16x2rs_l_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.l.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulfp32x16x2rs.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x2ras.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x2ras(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulfp32x2ras(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x2ras: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x2ras(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulfp32x2ras aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfp32x2rs.ll +declare <2 x i32> @llvm.xtensa.ae.mulfp32x2rs(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulfp32x2rs(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfp32x2rs: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulfp32x2rs(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulfp32x2rs aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulfq32sp24s_h_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulfq32sp24s.h.s2(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulfq32sp24s_h_s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfq32sp24s_h_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulfq32sp24s.h.s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulfq32sp24s.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulfq32sp24s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulfq32sp24s.l.s2(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulfq32sp24s_l_s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulfq32sp24s_l_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulfq32sp24s.l.s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulfq32sp24s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulp24x2.ll +declare <2 x i32> @llvm.xtensa.ae.mulp24x2(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulp24x2(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulp24x2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulp24x2(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulp24x2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulp24x2_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulp24x2.s2(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulp24x2_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulp24x2_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulp24x2.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulp24x2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulp32x16x2_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulp32x16x2.h(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulp32x16x2_h(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulp32x16x2_h: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulp32x16x2.h(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulp32x16x2.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulp32x16x2_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulp32x16x2.l(<2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulp32x16x2_l(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulp32x16x2_l: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulp32x16x2.l(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulp32x16x2.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulp32x2.ll +declare <2 x i32> @llvm.xtensa.ae.mulp32x2(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulp32x2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulp32x2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulp32x2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulp32x2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulq32sp16s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulq32sp16s.l.s2(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulq32sp16s_l_s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulq32sp16s_l_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulq32sp16s.l.s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulq32sp16s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulq32sp16u_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulq32sp16u.l.s2(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulq32sp16u_l_s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulq32sp16u_l_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulq32sp16u.l.s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulq32sp16u.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulrfq32sp24s_h_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulrfq32sp24s.h.s2(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulrfq32sp24s_h_s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulrfq32sp24s_h_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulrfq32sp24s.h.s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulrfq32sp24s.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulrfq32sp24s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulrfq32sp24s.l.s2(<1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulrfq32sp24s_l_s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulrfq32sp24s_l_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulrfq32sp24s.l.s2(<1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulrfq32sp24s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls16x4.ll +declare { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.muls16x4(<2 x i32>, <2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_muls16x4(<2 x i32> %ae_mul_q1, <2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) { +; CHECK-LABEL: test_xtensa_ae_muls16x4: + + + + +%ret = call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.muls16x4(<2 x i32> %ae_mul_q1, <2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) +; CHECK: ae_muls16x4 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <2 x i32> } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_muls32_hh.ll +declare <1 x i64> @llvm.xtensa.ae.muls32.hh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32_hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32_hh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32.hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32_lh.ll +declare <1 x i64> @llvm.xtensa.ae.muls32.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32_lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32.lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32_ll.ll +declare <1 x i64> @llvm.xtensa.ae.muls32.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32f48p16s_hh.ll +declare <1 x i64> @llvm.xtensa.ae.muls32f48p16s.hh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32f48p16s_hh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32f48p16s_hh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.hh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32f48p16s.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32f48p16s_hh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32f48p16s.hh.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32f48p16s_hh_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32f48p16s_hh_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.hh.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_muls32f48p16s.hh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32f48p16s_lh.ll +declare <1 x i64> @llvm.xtensa.ae.muls32f48p16s.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32f48p16s_lh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32f48p16s_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.lh(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32f48p16s.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32f48p16s_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32f48p16s.lh.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32f48p16s_lh_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32f48p16s_lh_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.lh.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_muls32f48p16s.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32f48p16s_ll.ll +declare <1 x i64> @llvm.xtensa.ae.muls32f48p16s.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32f48p16s_ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32f48p16s_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.ll(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32f48p16s.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32f48p16s_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32f48p16s.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32f48p16s_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32f48p16s_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_muls32f48p16s.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32u_ll.ll +declare <1 x i64> @llvm.xtensa.ae.muls32u.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_muls32u_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32u_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32u.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32u.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_h0.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.h0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_h0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_h0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.h0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32x16.h0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_h0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.h0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_h0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_h0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.h0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_muls32x16.h0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_h1.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.h1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_h1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_h1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.h1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32x16.h1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_h1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.h1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_h1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_h1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.h1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_muls32x16.h1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_h2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.h2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_h2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_h2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.h2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32x16.h2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_h2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.h2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_h2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_h2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.h2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_muls32x16.h2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_h3.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.h3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_h3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_h3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.h3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32x16.h3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_h3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.h3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_h3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_h3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.h3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_muls32x16.h3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_l0.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32x16.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_muls32x16.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_l1.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.l1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_l1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32x16.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.l1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_l1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_l1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.l1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_muls32x16.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_l2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32x16.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_muls32x16.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_l3.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.l3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_l3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_muls32x16.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_muls32x16_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.muls32x16.l3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_muls32x16_l3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_muls32x16_l3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.muls32x16.l3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_muls32x16.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsad24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulsad24.hh.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsad24_hh_ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsad24_hh_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsad24.hh.ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulsad24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsad24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsad24.hh.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsad24_hh_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsad24_hh_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsad24.hh.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsad24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsad32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulsad32x16.h1.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsad32x16_h1_l0(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsad32x16_h1_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h1.l0(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsad32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsad32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsad32x16.h1.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsad32x16_h1_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsad32x16_h1_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h1.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsad32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsad32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsad32x16.h3.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsad32x16_h3_l2(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsad32x16_h3_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h3.l2(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsad32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsad32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsad32x16.h3.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsad32x16_h3_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsad32x16_h3_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h3.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsad32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsafd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulsafd24.hh.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsafd24_hh_ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsafd24_hh_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsafd24.hh.ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulsafd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsafd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsafd24.hh.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsafd24_hh_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsafd24_hh_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsafd24.hh.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsafd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsafd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h1.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsafd32x16_h1_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsafd32x16_h1_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h1.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsafd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsafd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h1.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsafd32x16_h1_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsafd32x16_h1_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h1.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsafd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsafd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h3.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsafd32x16_h3_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsafd32x16_h3_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h3.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsafd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsafd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h3.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsafd32x16_h3_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsafd32x16_h3_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h3.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsafd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf16ss_00.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.00(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_00(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_00: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.00(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf16ss.00 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_00_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.00.s2(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_00_s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_00_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.00.s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsf16ss.00_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_10.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.10(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_10(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_10: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.10(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsf16ss.10 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_11.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.11(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_11(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_11: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.11(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsf16ss.11 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_20.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.20(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_20(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_20: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.20(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsf16ss.20 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_21.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.21(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_21(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_21: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.21(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsf16ss.21 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_22.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.22(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_22(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_22: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.22(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsf16ss.22 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_30.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.30(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_30(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_30: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.30(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsf16ss.30 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_31.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.31(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_31(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_31: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.31(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsf16ss.31 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_32.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.32(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_32(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_32: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.32(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsf16ss.32 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16ss_33.ll +declare <2 x i32> @llvm.xtensa.ae.mulsf16ss.33(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16ss_33(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf16ss_33: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsf16ss.33(<2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulsf16ss.33 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsf16x4ss.ll +declare { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulsf16x4ss(<2 x i32>, <2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsf16x4ss(<2 x i32> %ae_mul_q1, <2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) { +; CHECK-LABEL: test_xtensa_ae_mulsf16x4ss: + + + + +%ret = call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulsf16x4ss(<2 x i32> %ae_mul_q1, <2 x i32> %ae_mul_q0, <4 x i16> %ae_mul_d1, <4 x i16> %ae_mul_d0) +; CHECK: ae_mulsf16x4ss aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} +%ev = extractvalue { <2 x i32>, <2 x i32> } %ret, 0 +ret <2 x i32> %ev +} + + +;--- ae_mulsf32r_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32r.hh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf32r_hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32r_hh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32r.hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32r.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32r_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32r.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf32r_lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32r_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32r.lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32r.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32r_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32r.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf32r_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32r_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32r.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32r.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32r_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32r.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf32r_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32r_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32r.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsf32r.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32s_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32s.hh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf32s_hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32s_hh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32s.hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32s.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32s_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32s.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf32s_lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32s_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32s.lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32s.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32s_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32s.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf32s_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32s_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32s.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32s.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_h0.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.h0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_h0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_h0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32x16.h0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_h0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.h0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_h0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_h0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsf32x16.h0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_h1.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.h1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_h1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_h1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32x16.h1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_h1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.h1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_h1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_h1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsf32x16.h1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_h2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.h2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_h2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_h2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32x16.h2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_h2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.h2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_h2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_h2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsf32x16.h2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_h3.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.h3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_h3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_h3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32x16.h3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_h3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.h3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_h3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_h3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsf32x16.h3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32x16.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsf32x16.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_l1.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.l1(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_l1: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l1(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32x16.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.l1.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_l1_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_l1_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l1.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsf32x16.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32x16.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsf32x16.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_l3.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.l3(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_l3: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l3(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf32x16.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf32x16_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf32x16.l3.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulsf32x16_l3_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf32x16_l3_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l3.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsf32x16.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf48q32sp16s_l.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16s.l(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf48q32sp16s_l(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf48q32sp16s_l: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16s.l(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf48q32sp16s.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf48q32sp16s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16s.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf48q32sp16s_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf48q32sp16s_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16s.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsf48q32sp16s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf48q32sp16u_l.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16u.l(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf48q32sp16u_l(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf48q32sp16u_l: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16u.l(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <1 x i64> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsf48q32sp16u.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsf48q32sp16u_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16u.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsf48q32sp16u_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsf48q32sp16u_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16u.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsf48q32sp16u.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsfp24x2r.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp24x2r(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulsfp24x2r(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp24x2r: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp24x2r(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsfp24x2r aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp24x2r_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp24x2r.s2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulsfp24x2r_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp24x2r_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp24x2r.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsfp24x2r_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp24x2ra.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp24x2ra(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulsfp24x2ra(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp24x2ra: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp24x2ra(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsfp24x2ra aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp24x2ra_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp24x2ra.s2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulsfp24x2ra_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp24x2ra_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp24x2ra.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsfp24x2ra_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x16x2ras_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.h(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsfp32x16x2ras_h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x16x2ras_h: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsfp32x16x2ras.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x16x2ras_h_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.h.s2(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsfp32x16x2ras_h_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x16x2ras_h_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.h.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsfp32x16x2ras.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x16x2ras_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.l(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsfp32x16x2ras_l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x16x2ras_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsfp32x16x2ras.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x16x2ras_l_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.l.s2(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsfp32x16x2ras_l_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x16x2ras_l_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.l.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsfp32x16x2ras.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x16x2rs_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.h(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsfp32x16x2rs_h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x16x2rs_h: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsfp32x16x2rs.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x16x2rs_h_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.h.s2(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsfp32x16x2rs_h_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x16x2rs_h_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.h.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsfp32x16x2rs.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x16x2rs_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.l(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsfp32x16x2rs_l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x16x2rs_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsfp32x16x2rs.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x16x2rs_l_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.l.s2(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsfp32x16x2rs_l_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x16x2rs_l_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.l.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulsfp32x16x2rs.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x2ras.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x2ras(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulsfp32x2ras(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x2ras: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x2ras(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsfp32x2ras aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfp32x2rs.ll +declare <2 x i32> @llvm.xtensa.ae.mulsfp32x2rs(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulsfp32x2rs(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfp32x2rs: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsfp32x2rs(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsfp32x2rs aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsfq32sp24s_h_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsfq32sp24s.h.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsfq32sp24s_h_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfq32sp24s_h_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsfq32sp24s.h.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsfq32sp24s.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsfq32sp24s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsfq32sp24s.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsfq32sp24s_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsfq32sp24s_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsfq32sp24s.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsfq32sp24s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsp24x2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsp24x2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulsp24x2(<2 x i32> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsp24x2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsp24x2(<2 x i32> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulsp24x2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsp24x2_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsp24x2.s2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulsp24x2_s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsp24x2_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsp24x2.s2(<2 x i32> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsp24x2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsp32x16x2_h.ll +declare <2 x i32> @llvm.xtensa.ae.mulsp32x16x2.h(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsp32x16x2_h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsp32x16x2_h: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsp32x16x2.h(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsp32x16x2.h aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsp32x16x2_l.ll +declare <2 x i32> @llvm.xtensa.ae.mulsp32x16x2.l(<2 x i32>, <2 x i32>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulsp32x16x2_l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsp32x16x2_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsp32x16x2.l(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsp32x16x2.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsp32x2.ll +declare <2 x i32> @llvm.xtensa.ae.mulsp32x2(<2 x i32>, <2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_mulsp32x2(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsp32x2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulsp32x2(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulsp32x2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulsq32sp16s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsq32sp16s.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsq32sp16s_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsq32sp16s_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsq32sp16s.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsq32sp16s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsq32sp16u_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsq32sp16u.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsq32sp16u_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsq32sp16u_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsq32sp16u.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsq32sp16u.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsrfq32sp24s_h_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsrfq32sp24s.h.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsrfq32sp24s_h_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsrfq32sp24s_h_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsrfq32sp24s.h.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsrfq32sp24s.h_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulsrfq32sp24s_l_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulsrfq32sp24s.l.s2(<1 x i64>, <1 x i64>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulsrfq32sp24s_l_s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulsrfq32sp24s_l_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulsrfq32sp24s.l.s2(<1 x i64> %ae_mul_S2_q0, <1 x i64> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulsrfq32sp24s.l_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulss32f48p16s_hh.ll +declare <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.hh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulss32f48p16s_hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulss32f48p16s_hh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.hh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulss32f48p16s.hh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulss32f48p16s_hh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.hh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulss32f48p16s_hh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulss32f48p16s_hh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.hh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulss32f48p16s.hh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulss32f48p16s_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulss32f48p16s_lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulss32f48p16s_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.lh(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulss32f48p16s.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulss32f48p16s_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.lh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulss32f48p16s_lh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulss32f48p16s_lh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.lh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulss32f48p16s.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulss32f48p16s_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulss32f48p16s_ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulss32f48p16s_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.ll(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulss32f48p16s.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulss32f48p16s_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulss32f48p16s_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulss32f48p16s_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulss32f48p16s.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulssd24.hh.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulssd24_hh_ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssd24_hh_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssd24.hh.ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulssd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssd24.hh.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulssd24_hh_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssd24_hh_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssd24.hh.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulssd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulssd24.hl.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulssd24_hl_lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssd24_hl_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssd24.hl.lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulssd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssd24.hl.lh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulssd24_hl_lh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssd24_hl_lh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssd24.hl.lh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulssd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulssd32x16.h1.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulssd32x16_h1_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssd32x16_h1_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h1.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulssd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssd32x16.h1.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulssd32x16_h1_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssd32x16_h1_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h1.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulssd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssd32x16.h3.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulssd32x16_h3_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssd32x16_h3_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h3.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulssd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssd32x16.h3.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulssd32x16_h3_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssd32x16_h3_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h3.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulssd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssfd16ss_11_00.ll +declare <2 x i32> @llvm.xtensa.ae.mulssfd16ss.11.00(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulssfd16ss_11_00(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd16ss_11_00: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.11.00(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulssfd16ss.11_00 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulssfd16ss_11_00_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulssfd16ss.11.00.s2(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulssfd16ss_11_00_s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd16ss_11_00_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.11.00.s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulssfd16ss.11_00_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulssfd16ss_13_02.ll +declare <2 x i32> @llvm.xtensa.ae.mulssfd16ss.13.02(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulssfd16ss_13_02(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd16ss_13_02: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.13.02(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulssfd16ss.13_02 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulssfd16ss_13_02_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulssfd16ss.13.02.s2(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulssfd16ss_13_02_s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd16ss_13_02_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.13.02.s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulssfd16ss.13_02_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulssfd16ss_33_22.ll +declare <2 x i32> @llvm.xtensa.ae.mulssfd16ss.33.22(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulssfd16ss_33_22(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd16ss_33_22: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.33.22(<2 x i32> %opnd_ae_sem_mul_x2_S1_q0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulssfd16ss.33_22 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulssfd16ss_33_22_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulssfd16ss.33.22.s2(<2 x i32>, <4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulssfd16ss_33_22_s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd16ss_33_22_s2: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.33.22.s2(<2 x i32> %ae_mul_S2_q0, <4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulssfd16ss.33_22_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulssfd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulssfd24.hh.ll(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulssfd24_hh_ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd24_hh_ll: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssfd24.hh.ll(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulssfd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssfd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssfd24.hh.ll.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulssfd24_hh_ll_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd24_hh_ll_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssfd24.hh.ll.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulssfd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssfd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulssfd24.hl.lh(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulssfd24_hl_lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd24_hl_lh: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssfd24.hl.lh(<1 x i64> %ae_mul_q0, <2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulssfd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssfd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssfd24.hl.lh.s2(<1 x i64>, <2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulssfd24_hl_lh_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd24_hl_lh_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssfd24.hl.lh.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulssfd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssfd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h1.l0(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulssfd32x16_h1_l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd32x16_h1_l0: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h1.l0(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulssfd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssfd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h1.l0.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulssfd32x16_h1_l0_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd32x16_h1_l0_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h1.l0.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulssfd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssfd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h3.l2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulssfd32x16_h3_l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd32x16_h3_l2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h3.l2(<1 x i64> %opnd_ae_sem_mul_x2_S1_q0, <2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulssfd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulssfd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h3.l2.s2(<1 x i64>, <2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulssfd32x16_h3_l2_s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulssfd32x16_h3_l2_s2: + + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h3.l2.s2(<1 x i64> %ae_mul_S2_q0, <2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulssfd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad24.hh.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzaad24_hh_ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad24_hh_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad24.hh.ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzaad24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad24.hh.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzaad24_hh_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad24_hh_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad24.hh.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzaad24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad24.hl.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzaad24_hl_lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad24_hl_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad24.hl.lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzaad24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad24.hl.lh.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzaad24_hl_lh_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad24_hl_lh_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad24.hl.lh.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzaad24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad32x16_h0_l1.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h0.l1(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaad32x16_h0_l1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad32x16_h0_l1: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h0.l1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaad32x16.h0.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad32x16_h0_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h0.l1.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaad32x16_h0_l1_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad32x16_h0_l1_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h0.l1.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaad32x16.h0.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h1.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaad32x16_h1_l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad32x16_h1_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h1.l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaad32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h1.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaad32x16_h1_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad32x16_h1_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h1.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaad32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad32x16_h2_l3.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h2.l3(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaad32x16_h2_l3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad32x16_h2_l3: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h2.l3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaad32x16.h2.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad32x16_h2_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h2.l3.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaad32x16_h2_l3_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad32x16_h2_l3_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h2.l3.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaad32x16.h2.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h3.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaad32x16_h3_l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad32x16_h3_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h3.l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaad32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaad32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h3.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaad32x16_h3_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaad32x16_h3_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h3.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaad32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd16ss_11_00.ll +declare <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.11.00(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzaafd16ss_11_00(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd16ss_11_00: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.11.00(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaafd16ss.11_00 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzaafd16ss_11_00_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.11.00.s2(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzaafd16ss_11_00_s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd16ss_11_00_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.11.00.s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaafd16ss.11_00_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzaafd16ss_13_02.ll +declare <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.13.02(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzaafd16ss_13_02(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd16ss_13_02: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.13.02(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaafd16ss.13_02 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzaafd16ss_13_02_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.13.02.s2(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzaafd16ss_13_02_s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd16ss_13_02_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.13.02.s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaafd16ss.13_02_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzaafd16ss_33_22.ll +declare <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.33.22(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzaafd16ss_33_22(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd16ss_33_22: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.33.22(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaafd16ss.33_22 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzaafd16ss_33_22_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.33.22.s2(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzaafd16ss_33_22_s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd16ss_33_22_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.33.22.s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaafd16ss.33_22_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzaafd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd24.hh.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzaafd24_hh_ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd24_hh_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hh.ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzaafd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd24.hh.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzaafd24_hh_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd24_hh_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hh.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzaafd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd24.hl.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzaafd24_hl_lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd24_hl_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hl.lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzaafd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd24.hl.lh.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzaafd24_hl_lh_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd24_hl_lh_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hl.lh.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzaafd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd32x16_h0_l1.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h0.l1(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaafd32x16_h0_l1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd32x16_h0_l1: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h0.l1(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaafd32x16.h0.l1 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd32x16_h0_l1_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h0.l1.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaafd32x16_h0_l1_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd32x16_h0_l1_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h0.l1.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaafd32x16.h0.l1_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h1.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaafd32x16_h1_l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd32x16_h1_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h1.l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaafd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h1.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaafd32x16_h1_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd32x16_h1_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h1.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaafd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd32x16_h2_l3.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h2.l3(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaafd32x16_h2_l3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd32x16_h2_l3: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h2.l3(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaafd32x16.h2.l3 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd32x16_h2_l3_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h2.l3.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaafd32x16_h2_l3_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd32x16_h2_l3_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h2.l3.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaafd32x16.h2.l3_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h3.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaafd32x16_h3_l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd32x16_h3_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h3.l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzaafd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzaafd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h3.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzaafd32x16_h3_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzaafd32x16_h3_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h3.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzaafd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasd24.hh.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzasd24_hh_ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasd24_hh_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasd24.hh.ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzasd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasd24.hh.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzasd24_hh_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasd24_hh_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasd24.hh.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzasd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasd24.hl.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzasd24_hl_lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasd24_hl_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasd24.hl.lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzasd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasd24.hl.lh.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzasd24_hl_lh_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasd24_hl_lh_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasd24.hl.lh.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzasd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h1.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzasd32x16_h1_l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasd32x16_h1_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h1.l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzasd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h1.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzasd32x16_h1_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasd32x16_h1_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h1.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzasd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h3.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzasd32x16_h3_l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasd32x16_h3_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h3.l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzasd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h3.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzasd32x16_h3_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasd32x16_h3_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h3.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzasd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasfd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasfd24.hh.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzasfd24_hh_ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasfd24_hh_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hh.ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzasfd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasfd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasfd24.hh.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzasfd24_hh_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasfd24_hh_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hh.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzasfd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasfd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasfd24.hl.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzasfd24_hl_lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasfd24_hl_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hl.lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzasfd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasfd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasfd24.hl.lh.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzasfd24_hl_lh_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasfd24_hl_lh_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hl.lh.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzasfd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasfd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h1.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzasfd32x16_h1_l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasfd32x16_h1_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h1.l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzasfd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasfd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h1.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzasfd32x16_h1_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasfd32x16_h1_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h1.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzasfd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasfd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h3.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzasfd32x16_h3_l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasfd32x16_h3_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h3.l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzasfd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzasfd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h3.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzasfd32x16_h3_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzasfd32x16_h3_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h3.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzasfd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsad24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsad24.hh.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzsad24_hh_ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsad24_hh_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsad24.hh.ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzsad24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsad24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsad24.hh.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzsad24_hh_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsad24_hh_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsad24.hh.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzsad24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsad32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h1.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzsad32x16_h1_l0(<2 x i32> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsad32x16_h1_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h1.l0(<2 x i32> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulzsad32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsad32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h1.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzsad32x16_h1_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsad32x16_h1_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h1.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzsad32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsad32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h3.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzsad32x16_h3_l2(<2 x i32> %ae_mul_d0, <4 x i16> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsad32x16_h3_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h3.l2(<2 x i32> %ae_mul_d0, <4 x i16> %ae_mul_d1) +; CHECK: ae_mulzsad32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsad32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h3.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzsad32x16_h3_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsad32x16_h3_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h3.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzsad32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsafd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsafd24.hh.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzsafd24_hh_ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsafd24_hh_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsafd24.hh.ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzsafd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsafd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsafd24.hh.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzsafd24_hh_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsafd24_hh_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsafd24.hh.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzsafd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsafd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h1.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzsafd32x16_h1_l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsafd32x16_h1_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h1.l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzsafd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsafd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h1.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzsafd32x16_h1_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsafd32x16_h1_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h1.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzsafd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsafd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h3.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzsafd32x16_h3_l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsafd32x16_h3_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h3.l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzsafd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzsafd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h3.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzsafd32x16_h3_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzsafd32x16_h3_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h3.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzsafd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssd24.hh.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzssd24_hh_ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssd24_hh_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssd24.hh.ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzssd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssd24.hh.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzssd24_hh_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssd24_hh_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssd24.hh.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzssd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssd24.hl.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzssd24_hl_lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssd24_hl_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssd24.hl.lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzssd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssd24.hl.lh.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzssd24_hl_lh_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssd24_hl_lh_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssd24.hl.lh.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzssd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h1.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzssd32x16_h1_l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssd32x16_h1_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h1.l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzssd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h1.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzssd32x16_h1_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssd32x16_h1_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h1.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzssd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h3.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzssd32x16_h3_l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssd32x16_h3_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h3.l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzssd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h3.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzssd32x16_h3_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssd32x16_h3_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h3.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzssd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssfd16ss_11_00.ll +declare <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.11.00(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzssfd16ss_11_00(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd16ss_11_00: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.11.00(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzssfd16ss.11_00 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzssfd16ss_11_00_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.11.00.s2(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzssfd16ss_11_00_s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd16ss_11_00_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.11.00.s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzssfd16ss.11_00_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzssfd16ss_13_02.ll +declare <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.13.02(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzssfd16ss_13_02(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd16ss_13_02: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.13.02(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzssfd16ss.13_02 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzssfd16ss_13_02_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.13.02.s2(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzssfd16ss_13_02_s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd16ss_13_02_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.13.02.s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzssfd16ss.13_02_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzssfd16ss_33_22.ll +declare <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.33.22(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzssfd16ss_33_22(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd16ss_33_22: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.33.22(<4 x i16> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzssfd16ss.33_22 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzssfd16ss_33_22_s2.ll +declare <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.33.22.s2(<4 x i16>, <4 x i16>) +define <2 x i32> @test_xtensa_ae_mulzssfd16ss_33_22_s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd16ss_33_22_s2: + + +%ret = call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.33.22.s2(<4 x i16> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzssfd16ss.33_22_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_mulzssfd24_hh_ll.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssfd24.hh.ll(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzssfd24_hh_ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd24_hh_ll: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hh.ll(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzssfd24.hh.ll aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssfd24_hh_ll_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssfd24.hh.ll.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzssfd24_hh_ll_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd24_hh_ll_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hh.ll.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzssfd24.hh.ll_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssfd24_hl_lh.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssfd24.hl.lh(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzssfd24_hl_lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd24_hl_lh: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hl.lh(<2 x i32> %ae_mul_d0, <2 x i32> %ae_mul_d1) +; CHECK: ae_mulzssfd24.hl.lh aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssfd24_hl_lh_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssfd24.hl.lh.s2(<2 x i32>, <2 x i32>) +define <1 x i64> @test_xtensa_ae_mulzssfd24_hl_lh_s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd24_hl_lh_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hl.lh.s2(<2 x i32> %ae_mul_S2_d0, <2 x i32> %ae_mul_S2_d1) +; CHECK: ae_mulzssfd24.hl.lh_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssfd32x16_h1_l0.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h1.l0(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzssfd32x16_h1_l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd32x16_h1_l0: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h1.l0(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzssfd32x16.h1.l0 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssfd32x16_h1_l0_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h1.l0.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzssfd32x16_h1_l0_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd32x16_h1_l0_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h1.l0.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzssfd32x16.h1.l0_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssfd32x16_h3_l2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h3.l2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzssfd32x16_h3_l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd32x16_h3_l2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h3.l2(<2 x i32> %opnd_ae_sem_mul_x2_S1_d0, <4 x i16> %opnd_ae_sem_mul_x2_S1_d1) +; CHECK: ae_mulzssfd32x16.h3.l2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_mulzssfd32x16_h3_l2_s2.ll +declare <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h3.l2.s2(<2 x i32>, <4 x i16>) +define <1 x i64> @test_xtensa_ae_mulzssfd32x16_h3_l2_s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) { +; CHECK-LABEL: test_xtensa_ae_mulzssfd32x16_h3_l2_s2: + + +%ret = call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h3.l2.s2(<2 x i32> %ae_mul_S2_d0, <4 x i16> %ae_mul_S2_d1) +; CHECK: ae_mulzssfd32x16.h3.l2_s2 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_nand.ll +declare <1 x i64> @llvm.xtensa.ae.nand(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_nand(<1 x i64> %ae_dr_to_dr_v0, <1 x i64> %ae_dr_to_dr_v1) { +; CHECK-LABEL: test_xtensa_ae_nand: + + +%ret = call <1 x i64> @llvm.xtensa.ae.nand(<1 x i64> %ae_dr_to_dr_v0, <1 x i64> %ae_dr_to_dr_v1) +; CHECK: ae_nand aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_neg16s.ll +declare <4 x i16> @llvm.xtensa.ae.neg16s(<4 x i16>) +define <4 x i16> @test_xtensa_ae_neg16s(<4 x i16> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_neg16s: + +%ret = call <4 x i16> @llvm.xtensa.ae.neg16s(<4 x i16> %ae_arth_v1) +; CHECK: ae_neg16s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_neg24s.ll +declare <2 x i32> @llvm.xtensa.ae.neg24s(<2 x i32>) +define <2 x i32> @test_xtensa_ae_neg24s(<2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_neg24s: + +%ret = call <2 x i32> @llvm.xtensa.ae.neg24s(<2 x i32> %ae_arth_v1) +; CHECK: ae_neg24s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_neg32.ll +declare <2 x i32> @llvm.xtensa.ae.neg32(<2 x i32>) +define <2 x i32> @test_xtensa_ae_neg32(<2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_neg32: + +%ret = call <2 x i32> @llvm.xtensa.ae.neg32(<2 x i32> %ae_arth_v1) +; CHECK: ae_neg32 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_neg32s.ll +declare <2 x i32> @llvm.xtensa.ae.neg32s(<2 x i32>) +define <2 x i32> @test_xtensa_ae_neg32s(<2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_neg32s: + +%ret = call <2 x i32> @llvm.xtensa.ae.neg32s(<2 x i32> %ae_arth_v1) +; CHECK: ae_neg32s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_neg64.ll +declare <1 x i64> @llvm.xtensa.ae.neg64(<1 x i64>) +define <1 x i64> @test_xtensa_ae_neg64(<1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_neg64: + +%ret = call <1 x i64> @llvm.xtensa.ae.neg64(<1 x i64> %ae_arth_v1) +; CHECK: ae_neg64 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_neg64s.ll +declare <1 x i64> @llvm.xtensa.ae.neg64s(<1 x i64>) +define <1 x i64> @test_xtensa_ae_neg64s(<1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_neg64s: + +%ret = call <1 x i64> @llvm.xtensa.ae.neg64s(<1 x i64> %ae_arth_v1) +; CHECK: ae_neg64s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_nsa64.ll +declare i32 @llvm.xtensa.ae.nsa64(<1 x i64>) +define i32 @test_xtensa_ae_nsa64(<1 x i64> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_nsa64: + +%ret = call i32 @llvm.xtensa.ae.nsa64(<1 x i64> %ae_dr_to_ar_v0) +; CHECK: ae_nsa64 a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_nsaz16_0.ll +declare i32 @llvm.xtensa.ae.nsaz16.0(<4 x i16>) +define i32 @test_xtensa_ae_nsaz16_0(<4 x i16> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_nsaz16_0: + +%ret = call i32 @llvm.xtensa.ae.nsaz16.0(<4 x i16> %ae_dr_to_ar_v0) +; CHECK: ae_nsaz16.0 a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_nsaz32_l.ll +declare i32 @llvm.xtensa.ae.nsaz32.l(<2 x i32>) +define i32 @test_xtensa_ae_nsaz32_l(<2 x i32> %ae_dr_to_ar_v0) { +; CHECK-LABEL: test_xtensa_ae_nsaz32_l: + +%ret = call i32 @llvm.xtensa.ae.nsaz32.l(<2 x i32> %ae_dr_to_ar_v0) +; CHECK: ae_nsaz32.l a{{[0-9]+}}, aed{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_or.ll +declare <1 x i64> @llvm.xtensa.ae.or(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_or(<1 x i64> %ae_dr_to_dr_v0, <1 x i64> %ae_dr_to_dr_v1) { +; CHECK-LABEL: test_xtensa_ae_or: + + +%ret = call <1 x i64> @llvm.xtensa.ae.or(<1 x i64> %ae_dr_to_dr_v0, <1 x i64> %ae_dr_to_dr_v1) +; CHECK: ae_or aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_pksr24.ll +declare <2 x i32> @llvm.xtensa.ae.pksr24(<2 x i32>, <1 x i64>, i32) +define <2 x i32> @test_xtensa_ae_pksr24(<2 x i32> %ae_pks_d, <1 x i64> %ae_pks_s) { +; CHECK-LABEL: test_xtensa_ae_pksr24: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.pksr24(<2 x i32> %ae_pks_d, <1 x i64> %ae_pks_s, i32 0) +; CHECK: ae_pksr24 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_pksr32.ll +declare <2 x i32> @llvm.xtensa.ae.pksr32(<2 x i32>, <1 x i64>, i32) +define <2 x i32> @test_xtensa_ae_pksr32(<2 x i32> %ae_pks_d, <1 x i64> %ae_pks_s) { +; CHECK-LABEL: test_xtensa_ae_pksr32: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.pksr32(<2 x i32> %ae_pks_d, <1 x i64> %ae_pks_s, i32 0) +; CHECK: ae_pksr32 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_round16x4f32sasym.ll +declare <4 x i16> @llvm.xtensa.ae.round16x4f32sasym(<2 x i32>, <2 x i32>) +define <4 x i16> @test_xtensa_ae_round16x4f32sasym(<2 x i32> %ae_arth_v1, <2 x i32> %ae_arth_v0) { +; CHECK-LABEL: test_xtensa_ae_round16x4f32sasym: + + +%ret = call <4 x i16> @llvm.xtensa.ae.round16x4f32sasym(<2 x i32> %ae_arth_v1, <2 x i32> %ae_arth_v0) +; CHECK: ae_round16x4f32sasym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_round16x4f32ssym.ll +declare <4 x i16> @llvm.xtensa.ae.round16x4f32ssym(<2 x i32>, <2 x i32>) +define <4 x i16> @test_xtensa_ae_round16x4f32ssym(<2 x i32> %ae_arth_v1, <2 x i32> %ae_arth_v0) { +; CHECK-LABEL: test_xtensa_ae_round16x4f32ssym: + + +%ret = call <4 x i16> @llvm.xtensa.ae.round16x4f32ssym(<2 x i32> %ae_arth_v1, <2 x i32> %ae_arth_v0) +; CHECK: ae_round16x4f32ssym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_round24x2f48sasym.ll +declare <2 x i32> @llvm.xtensa.ae.round24x2f48sasym(<1 x i64>, <1 x i64>) +define <2 x i32> @test_xtensa_ae_round24x2f48sasym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_round24x2f48sasym: + + +%ret = call <2 x i32> @llvm.xtensa.ae.round24x2f48sasym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_round24x2f48sasym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_round24x2f48ssym.ll +declare <2 x i32> @llvm.xtensa.ae.round24x2f48ssym(<1 x i64>, <1 x i64>) +define <2 x i32> @test_xtensa_ae_round24x2f48ssym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_round24x2f48ssym: + + +%ret = call <2 x i32> @llvm.xtensa.ae.round24x2f48ssym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_round24x2f48ssym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_round32x2f48sasym.ll +declare <2 x i32> @llvm.xtensa.ae.round32x2f48sasym(<1 x i64>, <1 x i64>) +define <2 x i32> @test_xtensa_ae_round32x2f48sasym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_round32x2f48sasym: + + +%ret = call <2 x i32> @llvm.xtensa.ae.round32x2f48sasym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_round32x2f48sasym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_round32x2f48ssym.ll +declare <2 x i32> @llvm.xtensa.ae.round32x2f48ssym(<1 x i64>, <1 x i64>) +define <2 x i32> @test_xtensa_ae_round32x2f48ssym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_round32x2f48ssym: + + +%ret = call <2 x i32> @llvm.xtensa.ae.round32x2f48ssym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_round32x2f48ssym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_round32x2f64sasym.ll +declare <2 x i32> @llvm.xtensa.ae.round32x2f64sasym(<1 x i64>, <1 x i64>) +define <2 x i32> @test_xtensa_ae_round32x2f64sasym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_round32x2f64sasym: + + +%ret = call <2 x i32> @llvm.xtensa.ae.round32x2f64sasym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_round32x2f64sasym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_round32x2f64ssym.ll +declare <2 x i32> @llvm.xtensa.ae.round32x2f64ssym(<1 x i64>, <1 x i64>) +define <2 x i32> @test_xtensa_ae_round32x2f64ssym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_round32x2f64ssym: + + +%ret = call <2 x i32> @llvm.xtensa.ae.round32x2f64ssym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_round32x2f64ssym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_roundsp16f24asym.ll +declare <2 x i32> @llvm.xtensa.ae.roundsp16f24asym(<2 x i32>) +define <2 x i32> @test_xtensa_ae_roundsp16f24asym(<2 x i32> %ae_arth_v0) { +; CHECK-LABEL: test_xtensa_ae_roundsp16f24asym: + +%ret = call <2 x i32> @llvm.xtensa.ae.roundsp16f24asym(<2 x i32> %ae_arth_v0) +; CHECK: ae_roundsp16f24asym aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_roundsp16f24sym.ll +declare <2 x i32> @llvm.xtensa.ae.roundsp16f24sym(<2 x i32>) +define <2 x i32> @test_xtensa_ae_roundsp16f24sym(<2 x i32> %ae_arth_v0) { +; CHECK-LABEL: test_xtensa_ae_roundsp16f24sym: + +%ret = call <2 x i32> @llvm.xtensa.ae.roundsp16f24sym(<2 x i32> %ae_arth_v0) +; CHECK: ae_roundsp16f24sym aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_roundsp16q48x2asym.ll +declare <2 x i32> @llvm.xtensa.ae.roundsp16q48x2asym(<1 x i64>, <1 x i64>) +define <2 x i32> @test_xtensa_ae_roundsp16q48x2asym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_roundsp16q48x2asym: + + +%ret = call <2 x i32> @llvm.xtensa.ae.roundsp16q48x2asym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_roundsp16q48x2asym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_roundsp16q48x2sym.ll +declare <2 x i32> @llvm.xtensa.ae.roundsp16q48x2sym(<1 x i64>, <1 x i64>) +define <2 x i32> @test_xtensa_ae_roundsp16q48x2sym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_roundsp16q48x2sym: + + +%ret = call <2 x i32> @llvm.xtensa.ae.roundsp16q48x2sym(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_roundsp16q48x2sym aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_roundsq32f48asym.ll +declare <1 x i64> @llvm.xtensa.ae.roundsq32f48asym(<1 x i64>) +define <1 x i64> @test_xtensa_ae_roundsq32f48asym(<1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_roundsq32f48asym: + +%ret = call <1 x i64> @llvm.xtensa.ae.roundsq32f48asym(<1 x i64> %ae_arth_v1) +; CHECK: ae_roundsq32f48asym aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_roundsq32f48sym.ll +declare <1 x i64> @llvm.xtensa.ae.roundsq32f48sym(<1 x i64>) +define <1 x i64> @test_xtensa_ae_roundsq32f48sym(<1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_roundsq32f48sym: + +%ret = call <1 x i64> @llvm.xtensa.ae.roundsq32f48sym(<1 x i64> %ae_arth_v1) +; CHECK: ae_roundsq32f48sym aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_s16_0_i.ll +declare void @llvm.xtensa.ae.s16.0.i(<4 x i16>, ptr, i32) +define void @test_xtensa_ae_s16_0_i(<4 x i16> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16_0_i: + + + + call void @llvm.xtensa.ae.s16.0.i(<4 x i16> %ae_ls_v, ptr %ars, i32 -16) +; CHECK: ae_s16.0.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s16_0_ip.ll +declare ptr @llvm.xtensa.ae.s16.0.ip(<4 x i16>, ptr, i32) +define ptr @test_xtensa_ae_s16_0_ip(<4 x i16> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16_0_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s16.0.ip(<4 x i16> %ae_ls_v, ptr %ars, i32 -16) +; CHECK: ae_s16.0.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16_0_x.ll +declare void @llvm.xtensa.ae.s16.0.x(<4 x i16>, ptr, i32) +define void @test_xtensa_ae_s16_0_x(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16_0_x: + + + + call void @llvm.xtensa.ae.s16.0.x(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16.0.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s16_0_xc.ll +declare ptr @llvm.xtensa.ae.s16.0.xc(<4 x i16>, ptr, i32) +define ptr @test_xtensa_ae_s16_0_xc(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16_0_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s16.0.xc(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16.0.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16_0_xp.ll +declare ptr @llvm.xtensa.ae.s16.0.xp(<4 x i16>, ptr, i32) +define ptr @test_xtensa_ae_s16_0_xp(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16_0_xp: + + + +%ret = call ptr @llvm.xtensa.ae.s16.0.xp(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16.0.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16m_l_i.ll +declare void @llvm.xtensa.ae.s16m.l.i(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s16m_l_i(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16m_l_i: + + + + call void @llvm.xtensa.ae.s16m.l.i(<2 x i32> %ae_ls_v, ptr %ars, i32 -16) +; CHECK: ae_s16m.l.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s16m_l_iu.ll +declare ptr @llvm.xtensa.ae.s16m.l.iu(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s16m_l_iu(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16m_l_iu: + + + +%ret = call ptr @llvm.xtensa.ae.s16m.l.iu(<2 x i32> %ae_ls_v, ptr %ars, i32 -16) +; CHECK: ae_s16m.l.iu aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16m_l_x.ll +declare void @llvm.xtensa.ae.s16m.l.x(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s16m_l_x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16m_l_x: + + + + call void @llvm.xtensa.ae.s16m.l.x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16m.l.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s16m_l_xc.ll +declare ptr @llvm.xtensa.ae.s16m.l.xc(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s16m_l_xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16m_l_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s16m.l.xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16m.l.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16m_l_xu.ll +declare ptr @llvm.xtensa.ae.s16m.l.xu(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s16m_l_xu(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16m_l_xu: + + + +%ret = call ptr @llvm.xtensa.ae.s16m.l.xu(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16m.l.xu aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16x2m_i.ll +declare void @llvm.xtensa.ae.s16x2m.i(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s16x2m_i(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16x2m_i: + + + + call void @llvm.xtensa.ae.s16x2m.i(<2 x i32> %ae_ls_v, ptr %ars, i32 -32) +; CHECK: ae_s16x2m.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s16x2m_iu.ll +declare ptr @llvm.xtensa.ae.s16x2m.iu(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s16x2m_iu(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16x2m_iu: + + + +%ret = call ptr @llvm.xtensa.ae.s16x2m.iu(<2 x i32> %ae_ls_v, ptr %ars, i32 -32) +; CHECK: ae_s16x2m.iu aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16x2m_x.ll +declare void @llvm.xtensa.ae.s16x2m.x(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s16x2m_x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16x2m_x: + + + + call void @llvm.xtensa.ae.s16x2m.x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16x2m.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s16x2m_xc.ll +declare ptr @llvm.xtensa.ae.s16x2m.xc(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s16x2m_xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16x2m_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s16x2m.xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16x2m.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16x2m_xu.ll +declare ptr @llvm.xtensa.ae.s16x2m.xu(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s16x2m_xu(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16x2m_xu: + + + +%ret = call ptr @llvm.xtensa.ae.s16x2m.xu(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16x2m.xu aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16x4_i.ll +declare void @llvm.xtensa.ae.s16x4.i(<4 x i16>, ptr, i32) +define void @test_xtensa_ae_s16x4_i(<4 x i16> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16x4_i: + + + + call void @llvm.xtensa.ae.s16x4.i(<4 x i16> %ae_ls_v, ptr %ars, i32 -64) +; CHECK: ae_s16x4.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s16x4_ip.ll +declare ptr @llvm.xtensa.ae.s16x4.ip(<4 x i16>, ptr, i32) +define ptr @test_xtensa_ae_s16x4_ip(<4 x i16> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16x4_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s16x4.ip(<4 x i16> %ae_ls_v, ptr %ars, i32 0) +; CHECK: ae_s16x4.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16x4_ric.ll +declare ptr @llvm.xtensa.ae.s16x4.ric(<4 x i16>, ptr) +define ptr @test_xtensa_ae_s16x4_ric(<4 x i16> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16x4_ric: + + +%ret = call ptr @llvm.xtensa.ae.s16x4.ric(<4 x i16> %ae_ls_v, ptr %ars) +; CHECK: ae_s16x4.ric aed{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16x4_rip.ll +declare ptr @llvm.xtensa.ae.s16x4.rip(<4 x i16>, ptr) +define ptr @test_xtensa_ae_s16x4_rip(<4 x i16> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s16x4_rip: + + +%ret = call ptr @llvm.xtensa.ae.s16x4.rip(<4 x i16> %ae_ls_v, ptr %ars) +; CHECK: ae_s16x4.rip aed{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16x4_x.ll +declare void @llvm.xtensa.ae.s16x4.x(<4 x i16>, ptr, i32) +define void @test_xtensa_ae_s16x4_x(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16x4_x: + + + + call void @llvm.xtensa.ae.s16x4.x(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16x4.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s16x4_xc.ll +declare ptr @llvm.xtensa.ae.s16x4.xc(<4 x i16>, ptr, i32) +define ptr @test_xtensa_ae_s16x4_xc(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16x4_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s16x4.xc(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16x4.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s16x4_xp.ll +declare ptr @llvm.xtensa.ae.s16x4.xp(<4 x i16>, ptr, i32) +define ptr @test_xtensa_ae_s16x4_xp(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s16x4_xp: + + + +%ret = call ptr @llvm.xtensa.ae.s16x4.xp(<4 x i16> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s16x4.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s24ra64s_i.ll +declare void @llvm.xtensa.ae.s24ra64s.i(<1 x i64>, ptr, i32) +define void @test_xtensa_ae_s24ra64s_i(<1 x i64> %ae_ls_v1, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s24ra64s_i: + + + + call void @llvm.xtensa.ae.s24ra64s.i(<1 x i64> %ae_ls_v1, ptr %ars, i32 -32) +; CHECK: ae_s24ra64s.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s24ra64s_ip.ll +declare ptr @llvm.xtensa.ae.s24ra64s.ip(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s24ra64s_ip(<1 x i64> %ae_ls_v1, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s24ra64s_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s24ra64s.ip(<1 x i64> %ae_ls_v1, ptr %ars, i32 -32) +; CHECK: ae_s24ra64s.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s24ra64s_x.ll +declare void @llvm.xtensa.ae.s24ra64s.x(<1 x i64>, ptr, i32) +define void @test_xtensa_ae_s24ra64s_x(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s24ra64s_x: + + + + call void @llvm.xtensa.ae.s24ra64s.x(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) +; CHECK: ae_s24ra64s.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s24ra64s_xc.ll +declare ptr @llvm.xtensa.ae.s24ra64s.xc(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s24ra64s_xc(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s24ra64s_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s24ra64s.xc(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) +; CHECK: ae_s24ra64s.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s24ra64s_xp.ll +declare ptr @llvm.xtensa.ae.s24ra64s.xp(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s24ra64s_xp(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s24ra64s_xp: + + + +%ret = call ptr @llvm.xtensa.ae.s24ra64s.xp(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) +; CHECK: ae_s24ra64s.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s24x2ra64s_ip.ll +declare ptr @llvm.xtensa.ae.s24x2ra64s.ip(<1 x i64>, <1 x i64>, ptr) +define ptr @test_xtensa_ae_s24x2ra64s_ip(<1 x i64> %ae_ls_v2, <1 x i64> %ae_ls_v1, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s24x2ra64s_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s24x2ra64s.ip(<1 x i64> %ae_ls_v2, <1 x i64> %ae_ls_v1, ptr %ars) +; CHECK: ae_s24x2ra64s.ip aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32_l_i.ll +declare void @llvm.xtensa.ae.s32.l.i(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s32_l_i(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32_l_i: + + + + call void @llvm.xtensa.ae.s32.l.i(<2 x i32> %ae_ls_v, ptr %ars, i32 -32) +; CHECK: ae_s32.l.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s32_l_ip.ll +declare ptr @llvm.xtensa.ae.s32.l.ip(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32_l_ip(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32_l_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s32.l.ip(<2 x i32> %ae_ls_v, ptr %ars, i32 -32) +; CHECK: ae_s32.l.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32_l_x.ll +declare void @llvm.xtensa.ae.s32.l.x(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s32_l_x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32_l_x: + + + + call void @llvm.xtensa.ae.s32.l.x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32.l.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s32_l_xc.ll +declare ptr @llvm.xtensa.ae.s32.l.xc(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32_l_xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32_l_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s32.l.xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32.l.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32_l_xp.ll +declare ptr @llvm.xtensa.ae.s32.l.xp(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32_l_xp(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32_l_xp: + + + +%ret = call ptr @llvm.xtensa.ae.s32.l.xp(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32.l.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32f24_l_i.ll +declare void @llvm.xtensa.ae.s32f24.l.i(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s32f24_l_i(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32f24_l_i: + + + + call void @llvm.xtensa.ae.s32f24.l.i(<2 x i32> %ae_ls_v, ptr %ars, i32 -32) +; CHECK: ae_s32f24.l.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s32f24_l_ip.ll +declare ptr @llvm.xtensa.ae.s32f24.l.ip(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32f24_l_ip(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32f24_l_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s32f24.l.ip(<2 x i32> %ae_ls_v, ptr %ars, i32 -32) +; CHECK: ae_s32f24.l.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32f24_l_x.ll +declare void @llvm.xtensa.ae.s32f24.l.x(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s32f24_l_x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32f24_l_x: + + + + call void @llvm.xtensa.ae.s32f24.l.x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32f24.l.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s32f24_l_xc.ll +declare ptr @llvm.xtensa.ae.s32f24.l.xc(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32f24_l_xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32f24_l_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s32f24.l.xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32f24.l.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32f24_l_xp.ll +declare ptr @llvm.xtensa.ae.s32f24.l.xp(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32f24_l_xp(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32f24_l_xp: + + + +%ret = call ptr @llvm.xtensa.ae.s32f24.l.xp(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32f24.l.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32m_i.ll +declare void @llvm.xtensa.ae.s32m.i(<1 x i64>, ptr, i32) +define void @test_xtensa_ae_s32m_i(<1 x i64> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32m_i: + + + + call void @llvm.xtensa.ae.s32m.i(<1 x i64> %ae_ls_v, ptr %ars, i32 -32) +; CHECK: ae_s32m.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s32m_iu.ll +declare ptr @llvm.xtensa.ae.s32m.iu(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s32m_iu(<1 x i64> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32m_iu: + + + +%ret = call ptr @llvm.xtensa.ae.s32m.iu(<1 x i64> %ae_ls_v, ptr %ars, i32 -32) +; CHECK: ae_s32m.iu aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32m_x.ll +declare void @llvm.xtensa.ae.s32m.x(<1 x i64>, ptr, i32) +define void @test_xtensa_ae_s32m_x(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32m_x: + + + + call void @llvm.xtensa.ae.s32m.x(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32m.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s32m_xc.ll +declare ptr @llvm.xtensa.ae.s32m.xc(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s32m_xc(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32m_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s32m.xc(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32m.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32m_xu.ll +declare ptr @llvm.xtensa.ae.s32m.xu(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s32m_xu(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32m_xu: + + + +%ret = call ptr @llvm.xtensa.ae.s32m.xu(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32m.xu aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32ra64s_i.ll +declare void @llvm.xtensa.ae.s32ra64s.i(<1 x i64>, ptr, i32) +define void @test_xtensa_ae_s32ra64s_i(<1 x i64> %ae_ls_v1, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32ra64s_i: + + + + call void @llvm.xtensa.ae.s32ra64s.i(<1 x i64> %ae_ls_v1, ptr %ars, i32 -32) +; CHECK: ae_s32ra64s.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s32ra64s_ip.ll +declare ptr @llvm.xtensa.ae.s32ra64s.ip(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s32ra64s_ip(<1 x i64> %ae_ls_v1, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32ra64s_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s32ra64s.ip(<1 x i64> %ae_ls_v1, ptr %ars, i32 -32) +; CHECK: ae_s32ra64s.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32ra64s_x.ll +declare void @llvm.xtensa.ae.s32ra64s.x(<1 x i64>, ptr, i32) +define void @test_xtensa_ae_s32ra64s_x(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32ra64s_x: + + + + call void @llvm.xtensa.ae.s32ra64s.x(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) +; CHECK: ae_s32ra64s.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s32ra64s_xc.ll +declare ptr @llvm.xtensa.ae.s32ra64s.xc(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s32ra64s_xc(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32ra64s_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s32ra64s.xc(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) +; CHECK: ae_s32ra64s.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32ra64s_xp.ll +declare ptr @llvm.xtensa.ae.s32ra64s.xp(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s32ra64s_xp(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32ra64s_xp: + + + +%ret = call ptr @llvm.xtensa.ae.s32ra64s.xp(<1 x i64> %ae_ls_v1, ptr %ars, i32 %art) +; CHECK: ae_s32ra64s.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2_i.ll +declare void @llvm.xtensa.ae.s32x2.i(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s32x2_i(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32x2_i: + + + + call void @llvm.xtensa.ae.s32x2.i(<2 x i32> %ae_ls_v, ptr %ars, i32 -64) +; CHECK: ae_s32x2.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s32x2_ip.ll +declare ptr @llvm.xtensa.ae.s32x2.ip(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32x2_ip(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32x2_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s32x2.ip(<2 x i32> %ae_ls_v, ptr %ars, i32 0) +; CHECK: ae_s32x2.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2_ric.ll +declare ptr @llvm.xtensa.ae.s32x2.ric(<2 x i32>, ptr) +define ptr @test_xtensa_ae_s32x2_ric(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32x2_ric: + + +%ret = call ptr @llvm.xtensa.ae.s32x2.ric(<2 x i32> %ae_ls_v, ptr %ars) +; CHECK: ae_s32x2.ric aed{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2_rip.ll +declare ptr @llvm.xtensa.ae.s32x2.rip(<2 x i32>, ptr) +define ptr @test_xtensa_ae_s32x2_rip(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32x2_rip: + + +%ret = call ptr @llvm.xtensa.ae.s32x2.rip(<2 x i32> %ae_ls_v, ptr %ars) +; CHECK: ae_s32x2.rip aed{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2_x.ll +declare void @llvm.xtensa.ae.s32x2.x(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s32x2_x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32x2_x: + + + + call void @llvm.xtensa.ae.s32x2.x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32x2.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s32x2_xc.ll +declare ptr @llvm.xtensa.ae.s32x2.xc(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32x2_xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32x2_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s32x2.xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32x2.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2_xp.ll +declare ptr @llvm.xtensa.ae.s32x2.xp(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32x2_xp(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32x2_xp: + + + +%ret = call ptr @llvm.xtensa.ae.s32x2.xp(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32x2.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2f24_i.ll +declare void @llvm.xtensa.ae.s32x2f24.i(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s32x2f24_i(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32x2f24_i: + + + + call void @llvm.xtensa.ae.s32x2f24.i(<2 x i32> %ae_ls_v, ptr %ars, i32 -64) +; CHECK: ae_s32x2f24.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s32x2f24_ip.ll +declare ptr @llvm.xtensa.ae.s32x2f24.ip(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32x2f24_ip(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32x2f24_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s32x2f24.ip(<2 x i32> %ae_ls_v, ptr %ars, i32 0) +; CHECK: ae_s32x2f24.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2f24_ric.ll +declare ptr @llvm.xtensa.ae.s32x2f24.ric(<2 x i32>, ptr) +define ptr @test_xtensa_ae_s32x2f24_ric(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32x2f24_ric: + + +%ret = call ptr @llvm.xtensa.ae.s32x2f24.ric(<2 x i32> %ae_ls_v, ptr %ars) +; CHECK: ae_s32x2f24.ric aed{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2f24_rip.ll +declare ptr @llvm.xtensa.ae.s32x2f24.rip(<2 x i32>, ptr) +define ptr @test_xtensa_ae_s32x2f24_rip(<2 x i32> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32x2f24_rip: + + +%ret = call ptr @llvm.xtensa.ae.s32x2f24.rip(<2 x i32> %ae_ls_v, ptr %ars) +; CHECK: ae_s32x2f24.rip aed{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2f24_x.ll +declare void @llvm.xtensa.ae.s32x2f24.x(<2 x i32>, ptr, i32) +define void @test_xtensa_ae_s32x2f24_x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32x2f24_x: + + + + call void @llvm.xtensa.ae.s32x2f24.x(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32x2f24.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s32x2f24_xc.ll +declare ptr @llvm.xtensa.ae.s32x2f24.xc(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32x2f24_xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32x2f24_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s32x2f24.xc(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32x2f24.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2f24_xp.ll +declare ptr @llvm.xtensa.ae.s32x2f24.xp(<2 x i32>, ptr, i32) +define ptr @test_xtensa_ae_s32x2f24_xp(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s32x2f24_xp: + + + +%ret = call ptr @llvm.xtensa.ae.s32x2f24.xp(<2 x i32> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s32x2f24.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s32x2ra64s_ip.ll +declare ptr @llvm.xtensa.ae.s32x2ra64s.ip(<1 x i64>, <1 x i64>, ptr) +define ptr @test_xtensa_ae_s32x2ra64s_ip(<1 x i64> %ae_ls_v2, <1 x i64> %ae_ls_v1, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s32x2ra64s_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s32x2ra64s.ip(<1 x i64> %ae_ls_v2, <1 x i64> %ae_ls_v1, ptr %ars) +; CHECK: ae_s32x2ra64s.ip aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s64_i.ll +declare void @llvm.xtensa.ae.s64.i(<1 x i64>, ptr, i32) +define void @test_xtensa_ae_s64_i(<1 x i64> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s64_i: + + + + call void @llvm.xtensa.ae.s64.i(<1 x i64> %ae_ls_v, ptr %ars, i32 -64) +; CHECK: ae_s64.i aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_s64_ip.ll +declare ptr @llvm.xtensa.ae.s64.ip(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s64_ip(<1 x i64> %ae_ls_v, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_s64_ip: + + + +%ret = call ptr @llvm.xtensa.ae.s64.ip(<1 x i64> %ae_ls_v, ptr %ars, i32 -64) +; CHECK: ae_s64.ip aed{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s64_x.ll +declare void @llvm.xtensa.ae.s64.x(<1 x i64>, ptr, i32) +define void @test_xtensa_ae_s64_x(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s64_x: + + + + call void @llvm.xtensa.ae.s64.x(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s64.x aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret void +} + + +;--- ae_s64_xc.ll +declare ptr @llvm.xtensa.ae.s64.xc(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s64_xc(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s64_xc: + + + +%ret = call ptr @llvm.xtensa.ae.s64.xc(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s64.xc aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_s64_xp.ll +declare ptr @llvm.xtensa.ae.s64.xp(<1 x i64>, ptr, i32) +define ptr @test_xtensa_ae_s64_xp(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_s64_xp: + + + +%ret = call ptr @llvm.xtensa.ae.s64.xp(<1 x i64> %ae_ls_v, ptr %ars, i32 %art) +; CHECK: ae_s64.xp aed{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sa16x4_ic.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ic(<4 x i16>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa16x4_ic(<4 x i16> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa16x4_ic: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ic(<4 x i16> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa16x4.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa16x4_ip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ip(<4 x i16>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa16x4_ip(<4 x i16> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa16x4_ip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ip(<4 x i16> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa16x4.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa16x4_ric.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ric(<4 x i16>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa16x4_ric(<4 x i16> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa16x4_ric: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ric(<4 x i16> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa16x4.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa16x4_rip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.rip(<4 x i16>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa16x4_rip(<4 x i16> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa16x4_rip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.rip(<4 x i16> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa16x4.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa24_l_ic.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ic(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa24_l_ic(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa24_l_ic: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ic(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa24.l.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa24_l_ip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ip(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa24_l_ip(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa24_l_ip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ip(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa24.l.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa24_l_ric.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ric(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa24_l_ric(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa24_l_ric: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ric(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa24.l.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa24_l_rip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.rip(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa24_l_rip(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa24_l_rip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.rip(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa24.l.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa24x2_ic.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ic(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa24x2_ic(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa24x2_ic: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ic(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa24x2.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa24x2_ip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ip(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa24x2_ip(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa24x2_ip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ip(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa24x2.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa24x2_ric.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ric(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa24x2_ric(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa24x2_ric: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ric(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa24x2.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa24x2_rip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.rip(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa24x2_rip(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa24x2_rip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.rip(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa24x2.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa32x2_ic.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ic(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa32x2_ic(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa32x2_ic: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ic(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa32x2.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa32x2_ip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ip(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa32x2_ip(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa32x2_ip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ip(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa32x2.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa32x2_ric.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ric(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa32x2_ric(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa32x2_ric: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ric(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa32x2.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa32x2_rip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.rip(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa32x2_rip(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa32x2_rip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.rip(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa32x2.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa32x2f24_ic.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ic(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa32x2f24_ic(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa32x2f24_ic: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ic(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa32x2f24.ic aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa32x2f24_ip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ip(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa32x2f24_ip(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa32x2f24_ip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ip(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa32x2f24.ip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa32x2f24_ric.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ric(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa32x2f24_ric(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa32x2f24_ric: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ric(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa32x2f24.ric aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa32x2f24_rip.ll +declare { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.rip(<2 x i32>, <8 x i8>, ptr) +define <1 x i64> @test_xtensa_ae_sa32x2f24_rip(<2 x i32> %ae_ls_v, <1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa32x2f24_rip: + +%wrap1 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.rip(<2 x i32> %ae_ls_v, <8 x i8> %wrap1, ptr %ars) +; CHECK: ae_sa32x2f24.rip aed{{[0-9]+}}, u{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <8 x i8>, ptr } %ret, 0 +%unwrap = bitcast <8 x i8> %ev to <1 x i64> +ret <1 x i64> %unwrap +} + + +;--- ae_sa64neg_fp.ll +declare <8 x i8> @llvm.xtensa.ae.sa64neg.fp(<8 x i8>, ptr) +define <8 x i8> @test_xtensa_ae_sa64neg_fp(<1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa64neg_fp: +%wrap0 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call <8 x i8> @llvm.xtensa.ae.sa64neg.fp(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_sa64neg.fp u{{[0-9]+}}, a{{[0-9]+}} + +ret <8 x i8> %ret +} + + +;--- ae_sa64pos_fp.ll +declare <8 x i8> @llvm.xtensa.ae.sa64pos.fp(<8 x i8>, ptr) +define <8 x i8> @test_xtensa_ae_sa64pos_fp(<1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sa64pos_fp: +%wrap0 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + +%ret = call <8 x i8> @llvm.xtensa.ae.sa64pos.fp(<8 x i8> %wrap0, ptr %ars) +; CHECK: ae_sa64pos.fp u{{[0-9]+}}, a{{[0-9]+}} + +ret <8 x i8> %ret +} + + +;--- ae_salign64_i.ll +declare void @llvm.xtensa.ae.salign64.i(<8 x i8>, ptr, i32) +define void @test_xtensa_ae_salign64_i(<1 x i64> %ae_ls_su, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_salign64_i: +%wrap0 = bitcast <1 x i64> %ae_ls_su to <8 x i8> + + + call void @llvm.xtensa.ae.salign64.i(<8 x i8> %wrap0, ptr %ars, i32 -64) +; CHECK: ae_salign64.i u{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret void +} + + +;--- ae_sat16x4.ll +declare <4 x i16> @llvm.xtensa.ae.sat16x4(<2 x i32>, <2 x i32>) +define <4 x i16> @test_xtensa_ae_sat16x4(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sat16x4: + + +%ret = call <4 x i16> @llvm.xtensa.ae.sat16x4(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_sat16x4 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_sat24s.ll +declare <2 x i32> @llvm.xtensa.ae.sat24s(<2 x i32>) +define <2 x i32> @test_xtensa_ae_sat24s(<2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sat24s: + +%ret = call <2 x i32> @llvm.xtensa.ae.sat24s(<2 x i32> %ae_arth_v1) +; CHECK: ae_sat24s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sat48s.ll +declare <1 x i64> @llvm.xtensa.ae.sat48s(<1 x i64>) +define <1 x i64> @test_xtensa_ae_sat48s(<1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sat48s: + +%ret = call <1 x i64> @llvm.xtensa.ae.sat48s(<1 x i64> %ae_arth_v1) +; CHECK: ae_sat48s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_satq56s.ll +declare <1 x i64> @llvm.xtensa.ae.satq56s(<1 x i64>) +define <1 x i64> @test_xtensa_ae_satq56s(<1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_satq56s: + +%ret = call <1 x i64> @llvm.xtensa.ae.satq56s(<1 x i64> %ae_arth_v1) +; CHECK: ae_satq56s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_sb.ll +declare ptr @llvm.xtensa.ae.sb(ptr, i32) +define ptr @test_xtensa_ae_sb(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_sb: + + +%ret = call ptr @llvm.xtensa.ae.sb(ptr %ars, i32 %art) +; CHECK: ae_sb a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sb_ic.ll +declare ptr @llvm.xtensa.ae.sb.ic(ptr, i32) +define ptr @test_xtensa_ae_sb_ic(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_sb_ic: + + +%ret = call ptr @llvm.xtensa.ae.sb.ic(ptr %ars, i32 %art) +; CHECK: ae_sb.ic a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sb_ip.ll +declare ptr @llvm.xtensa.ae.sb.ip(ptr, i32) +define ptr @test_xtensa_ae_sb_ip(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_sb_ip: + + +%ret = call ptr @llvm.xtensa.ae.sb.ip(ptr %ars, i32 %art) +; CHECK: ae_sb.ip a{{[0-9]+}}, a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sbf.ll +declare ptr @llvm.xtensa.ae.sbf(ptr) +define ptr @test_xtensa_ae_sbf(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sbf: + +%ret = call ptr @llvm.xtensa.ae.sbf(ptr %ars) +; CHECK: ae_sbf a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sbf_ic.ll +declare ptr @llvm.xtensa.ae.sbf.ic(ptr) +define ptr @test_xtensa_ae_sbf_ic(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sbf_ic: + +%ret = call ptr @llvm.xtensa.ae.sbf.ic(ptr %ars) +; CHECK: ae_sbf.ic a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sbf_ip.ll +declare ptr @llvm.xtensa.ae.sbf.ip(ptr) +define ptr @test_xtensa_ae_sbf_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_sbf_ip: + +%ret = call ptr @llvm.xtensa.ae.sbf.ip(ptr %ars) +; CHECK: ae_sbf.ip a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sbi.ll +declare ptr @llvm.xtensa.ae.sbi(ptr, i32, i32) +define ptr @test_xtensa_ae_sbi(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_sbi: + + + +%ret = call ptr @llvm.xtensa.ae.sbi(ptr %ars, i32 %art, i32 1) +; CHECK: ae_sbi a{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sbi_ic.ll +declare ptr @llvm.xtensa.ae.sbi.ic(ptr, i32, i32) +define ptr @test_xtensa_ae_sbi_ic(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_sbi_ic: + + + +%ret = call ptr @llvm.xtensa.ae.sbi.ic(ptr %ars, i32 %art, i32 1) +; CHECK: ae_sbi.ic a{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sbi_ip.ll +declare ptr @llvm.xtensa.ae.sbi.ip(ptr, i32, i32) +define ptr @test_xtensa_ae_sbi_ip(ptr %ars, i32 %art) { +; CHECK-LABEL: test_xtensa_ae_sbi_ip: + + + +%ret = call ptr @llvm.xtensa.ae.sbi.ip(ptr %ars, i32 %art, i32 1) +; CHECK: ae_sbi.ip a{{[0-9]+}}, a{{[0-9]+}}, {{-?[0-9]+}} + +ret ptr %ret +} + + +;--- ae_sel16i.ll +declare <4 x i16> @llvm.xtensa.ae.sel16i(<4 x i16>, <4 x i16>, i32) +define <4 x i16> @test_xtensa_ae_sel16i(<4 x i16> %ae_dr_to_dr_v0, <4 x i16> %ae_dr_to_dr_v1) { +; CHECK-LABEL: test_xtensa_ae_sel16i: + + + +%ret = call <4 x i16> @llvm.xtensa.ae.sel16i(<4 x i16> %ae_dr_to_dr_v0, <4 x i16> %ae_dr_to_dr_v1, i32 0) +; CHECK: ae_sel16i aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_sel16i_n.ll +declare <4 x i16> @llvm.xtensa.ae.sel16i.n(<4 x i16>, <4 x i16>, i32) +define <4 x i16> @test_xtensa_ae_sel16i_n(<4 x i16> %ae_dr_to_dr_v0, <4 x i16> %ae_dr_to_dr_v1) { +; CHECK-LABEL: test_xtensa_ae_sel16i_n: + + + +%ret = call <4 x i16> @llvm.xtensa.ae.sel16i.n(<4 x i16> %ae_dr_to_dr_v0, <4 x i16> %ae_dr_to_dr_v1, i32 0) +; CHECK: ae_sel16i.n aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_sext32.ll +declare <2 x i32> @llvm.xtensa.ae.sext32(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_sext32(<2 x i32> %ae_dr_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_sext32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.sext32(<2 x i32> %ae_dr_to_dr_v0, i32 7) +; CHECK: ae_sext32 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sext32x2d16_10.ll +declare <2 x i32> @llvm.xtensa.ae.sext32x2d16.10(<4 x i16>) +define <2 x i32> @test_xtensa_ae_sext32x2d16_10(<4 x i16> %ae_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_sext32x2d16_10: + +%ret = call <2 x i32> @llvm.xtensa.ae.sext32x2d16.10(<4 x i16> %ae_to_dr_v0) +; CHECK: ae_sext32x2d16.10 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sext32x2d16_32.ll +declare <2 x i32> @llvm.xtensa.ae.sext32x2d16.32(<4 x i16>) +define <2 x i32> @test_xtensa_ae_sext32x2d16_32(<4 x i16> %ae_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_sext32x2d16_32: + +%ret = call <2 x i32> @llvm.xtensa.ae.sext32x2d16.32(<4 x i16> %ae_to_dr_v0) +; CHECK: ae_sext32x2d16.32 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sha32.ll +declare i32 @llvm.xtensa.ae.sha32(i32) +define i32 @test_xtensa_ae_sha32(i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_sha32: + +%ret = call i32 @llvm.xtensa.ae.sha32(i32 %ars) +; CHECK: ae_sha32 a{{[0-9]+}}, a{{[0-9]+}} + +ret i32 %ret +} + + +;--- ae_shortswap.ll +declare <4 x i16> @llvm.xtensa.ae.shortswap(<4 x i16>) +define <4 x i16> @test_xtensa_ae_shortswap(<4 x i16> %ae_to_dr_v0) { +; CHECK-LABEL: test_xtensa_ae_shortswap: + +%ret = call <4 x i16> @llvm.xtensa.ae.shortswap(<4 x i16> %ae_to_dr_v0) +; CHECK: ae_shortswap aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_slaa16s.ll +declare <4 x i16> @llvm.xtensa.ae.slaa16s(<4 x i16>, i32) +define <4 x i16> @test_xtensa_ae_slaa16s(<4 x i16> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_slaa16s: + + +%ret = call <4 x i16> @llvm.xtensa.ae.slaa16s(<4 x i16> %ae_shift_d0, i32 %ars) +; CHECK: ae_slaa16s aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_slaa32.ll +declare <2 x i32> @llvm.xtensa.ae.slaa32(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_slaa32(<2 x i32> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_slaa32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.slaa32(<2 x i32> %ae_shift_d0, i32 %ars) +; CHECK: ae_slaa32 aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slaa32s.ll +declare <2 x i32> @llvm.xtensa.ae.slaa32s(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_slaa32s(<2 x i32> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_slaa32s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.slaa32s(<2 x i32> %ae_shift_d0, i32 %ars) +; CHECK: ae_slaa32s aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slaa64.ll +declare <1 x i64> @llvm.xtensa.ae.slaa64(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_slaa64(<1 x i64> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_slaa64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.slaa64(<1 x i64> %ae_shift_d0, i32 %ars) +; CHECK: ae_slaa64 aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_slaa64s.ll +declare <1 x i64> @llvm.xtensa.ae.slaa64s(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_slaa64s(<1 x i64> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_slaa64s: + + +%ret = call <1 x i64> @llvm.xtensa.ae.slaa64s(<1 x i64> %ae_shift_d0, i32 %ars) +; CHECK: ae_slaa64s aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_slaaq56.ll +declare <1 x i64> @llvm.xtensa.ae.slaaq56(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_slaaq56(<1 x i64> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_slaaq56: + + +%ret = call <1 x i64> @llvm.xtensa.ae.slaaq56(<1 x i64> %ae_shift_d0, i32 %ars) +; CHECK: ae_slaaq56 aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_slai16s.ll +declare <4 x i16> @llvm.xtensa.ae.slai16s(<4 x i16>, i32) +define <4 x i16> @test_xtensa_ae_slai16s(<4 x i16> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slai16s: + + +%ret = call <4 x i16> @llvm.xtensa.ae.slai16s(<4 x i16> %ae_shift_d0, i32 0) +; CHECK: ae_slai16s aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_slai24.ll +declare <2 x i32> @llvm.xtensa.ae.slai24(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_slai24(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slai24: + + +%ret = call <2 x i32> @llvm.xtensa.ae.slai24(<2 x i32> %ae_shift_d0, i32 0) +; CHECK: ae_slai24 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slai24s.ll +declare <2 x i32> @llvm.xtensa.ae.slai24s(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_slai24s(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slai24s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.slai24s(<2 x i32> %ae_shift_d0, i32 0) +; CHECK: ae_slai24s aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slai32.ll +declare <2 x i32> @llvm.xtensa.ae.slai32(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_slai32(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slai32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.slai32(<2 x i32> %ae_shift_d0, i32 0) +; CHECK: ae_slai32 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slai32s.ll +declare <2 x i32> @llvm.xtensa.ae.slai32s(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_slai32s(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slai32s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.slai32s(<2 x i32> %ae_shift_d0, i32 0) +; CHECK: ae_slai32s aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slai64.ll +declare <1 x i64> @llvm.xtensa.ae.slai64(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_slai64(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slai64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.slai64(<1 x i64> %ae_shift_d0, i32 0) +; CHECK: ae_slai64 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_slai64s.ll +declare <1 x i64> @llvm.xtensa.ae.slai64s(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_slai64s(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slai64s: + + +%ret = call <1 x i64> @llvm.xtensa.ae.slai64s(<1 x i64> %ae_shift_d0, i32 0) +; CHECK: ae_slai64s aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_slaisq56s.ll +declare <1 x i64> @llvm.xtensa.ae.slaisq56s(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_slaisq56s(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slaisq56s: + + +%ret = call <1 x i64> @llvm.xtensa.ae.slaisq56s(<1 x i64> %ae_shift_d0, i32 0) +; CHECK: ae_slaisq56s aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_slas24.ll +declare <2 x i32> @llvm.xtensa.ae.slas24(<2 x i32>) +define <2 x i32> @test_xtensa_ae_slas24(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slas24: + +%ret = call <2 x i32> @llvm.xtensa.ae.slas24(<2 x i32> %ae_shift_d0) +; CHECK: ae_slas24 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slas24s.ll +declare <2 x i32> @llvm.xtensa.ae.slas24s(<2 x i32>) +define <2 x i32> @test_xtensa_ae_slas24s(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slas24s: + +%ret = call <2 x i32> @llvm.xtensa.ae.slas24s(<2 x i32> %ae_shift_d0) +; CHECK: ae_slas24s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slas32.ll +declare <2 x i32> @llvm.xtensa.ae.slas32(<2 x i32>) +define <2 x i32> @test_xtensa_ae_slas32(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slas32: + +%ret = call <2 x i32> @llvm.xtensa.ae.slas32(<2 x i32> %ae_shift_d0) +; CHECK: ae_slas32 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slas32s.ll +declare <2 x i32> @llvm.xtensa.ae.slas32s(<2 x i32>) +define <2 x i32> @test_xtensa_ae_slas32s(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slas32s: + +%ret = call <2 x i32> @llvm.xtensa.ae.slas32s(<2 x i32> %ae_shift_d0) +; CHECK: ae_slas32s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_slas64.ll +declare <1 x i64> @llvm.xtensa.ae.slas64(<1 x i64>) +define <1 x i64> @test_xtensa_ae_slas64(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slas64: + +%ret = call <1 x i64> @llvm.xtensa.ae.slas64(<1 x i64> %ae_shift_d0) +; CHECK: ae_slas64 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_slas64s.ll +declare <1 x i64> @llvm.xtensa.ae.slas64s(<1 x i64>) +define <1 x i64> @test_xtensa_ae_slas64s(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slas64s: + +%ret = call <1 x i64> @llvm.xtensa.ae.slas64s(<1 x i64> %ae_shift_d0) +; CHECK: ae_slas64s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_slasq56.ll +declare <1 x i64> @llvm.xtensa.ae.slasq56(<1 x i64>) +define <1 x i64> @test_xtensa_ae_slasq56(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slasq56: + +%ret = call <1 x i64> @llvm.xtensa.ae.slasq56(<1 x i64> %ae_shift_d0) +; CHECK: ae_slasq56 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_slassq56s.ll +declare <1 x i64> @llvm.xtensa.ae.slassq56s(<1 x i64>) +define <1 x i64> @test_xtensa_ae_slassq56s(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_slassq56s: + +%ret = call <1 x i64> @llvm.xtensa.ae.slassq56s(<1 x i64> %ae_shift_d0) +; CHECK: ae_slassq56s aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_sra64_32.ll +declare <1 x i64> @llvm.xtensa.ae.sra64.32(<2 x i32>, i32) +define <1 x i64> @test_xtensa_ae_sra64_32(<2 x i32> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_sra64_32: + + +%ret = call <1 x i64> @llvm.xtensa.ae.sra64.32(<2 x i32> %ae_shift_d0, i32 %ars) +; CHECK: ae_sra64_32 aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_sraa16rs.ll +declare <4 x i16> @llvm.xtensa.ae.sraa16rs(<4 x i16>, i32) +define <4 x i16> @test_xtensa_ae_sraa16rs(<4 x i16> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_sraa16rs: + + +%ret = call <4 x i16> @llvm.xtensa.ae.sraa16rs(<4 x i16> %ae_shift_d0, i32 %ars) +; CHECK: ae_sraa16rs aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_sraa16s.ll +declare <4 x i16> @llvm.xtensa.ae.sraa16s(<4 x i16>, i32) +define <4 x i16> @test_xtensa_ae_sraa16s(<4 x i16> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_sraa16s: + + +%ret = call <4 x i16> @llvm.xtensa.ae.sraa16s(<4 x i16> %ae_shift_d0, i32 %ars) +; CHECK: ae_sraa16s aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_sraa32.ll +declare <2 x i32> @llvm.xtensa.ae.sraa32(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_sraa32(<2 x i32> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_sraa32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.sraa32(<2 x i32> %ae_shift_d0, i32 %ars) +; CHECK: ae_sraa32 aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sraa32rs.ll +declare <2 x i32> @llvm.xtensa.ae.sraa32rs(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_sraa32rs(<2 x i32> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_sraa32rs: + + +%ret = call <2 x i32> @llvm.xtensa.ae.sraa32rs(<2 x i32> %ae_shift_d0, i32 %ars) +; CHECK: ae_sraa32rs aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sraa32s.ll +declare <2 x i32> @llvm.xtensa.ae.sraa32s(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_sraa32s(<2 x i32> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_sraa32s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.sraa32s(<2 x i32> %ae_shift_d0, i32 %ars) +; CHECK: ae_sraa32s aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sraa64.ll +declare <1 x i64> @llvm.xtensa.ae.sraa64(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_sraa64(<1 x i64> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_sraa64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.sraa64(<1 x i64> %ae_shift_d0, i32 %ars) +; CHECK: ae_sraa64 aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_srai16.ll +declare <4 x i16> @llvm.xtensa.ae.srai16(<4 x i16>, i32) +define <4 x i16> @test_xtensa_ae_srai16(<4 x i16> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srai16: + + +%ret = call <4 x i16> @llvm.xtensa.ae.srai16(<4 x i16> %ae_shift_d0, i32 0) +; CHECK: ae_srai16 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_srai16r.ll +declare <4 x i16> @llvm.xtensa.ae.srai16r(<4 x i16>, i32) +define <4 x i16> @test_xtensa_ae_srai16r(<4 x i16> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srai16r: + + +%ret = call <4 x i16> @llvm.xtensa.ae.srai16r(<4 x i16> %ae_shift_d0, i32 0) +; CHECK: ae_srai16r aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_srai24.ll +declare <2 x i32> @llvm.xtensa.ae.srai24(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_srai24(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srai24: + + +%ret = call <2 x i32> @llvm.xtensa.ae.srai24(<2 x i32> %ae_shift_d0, i32 0) +; CHECK: ae_srai24 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_srai32.ll +declare <2 x i32> @llvm.xtensa.ae.srai32(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_srai32(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srai32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.srai32(<2 x i32> %ae_shift_d0, i32 0) +; CHECK: ae_srai32 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_srai32r.ll +declare <2 x i32> @llvm.xtensa.ae.srai32r(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_srai32r(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srai32r: + + +%ret = call <2 x i32> @llvm.xtensa.ae.srai32r(<2 x i32> %ae_shift_d0, i32 0) +; CHECK: ae_srai32r aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_srai64.ll +declare <1 x i64> @llvm.xtensa.ae.srai64(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_srai64(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srai64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.srai64(<1 x i64> %ae_shift_d0, i32 0) +; CHECK: ae_srai64 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_sras24.ll +declare <2 x i32> @llvm.xtensa.ae.sras24(<2 x i32>) +define <2 x i32> @test_xtensa_ae_sras24(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_sras24: + +%ret = call <2 x i32> @llvm.xtensa.ae.sras24(<2 x i32> %ae_shift_d0) +; CHECK: ae_sras24 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sras32.ll +declare <2 x i32> @llvm.xtensa.ae.sras32(<2 x i32>) +define <2 x i32> @test_xtensa_ae_sras32(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_sras32: + +%ret = call <2 x i32> @llvm.xtensa.ae.sras32(<2 x i32> %ae_shift_d0) +; CHECK: ae_sras32 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sras64.ll +declare <1 x i64> @llvm.xtensa.ae.sras64(<1 x i64>) +define <1 x i64> @test_xtensa_ae_sras64(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_sras64: + +%ret = call <1 x i64> @llvm.xtensa.ae.sras64(<1 x i64> %ae_shift_d0) +; CHECK: ae_sras64 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_srla32.ll +declare <2 x i32> @llvm.xtensa.ae.srla32(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_srla32(<2 x i32> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_srla32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.srla32(<2 x i32> %ae_shift_d0, i32 %ars) +; CHECK: ae_srla32 aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_srla64.ll +declare <1 x i64> @llvm.xtensa.ae.srla64(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_srla64(<1 x i64> %ae_shift_d0, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_srla64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.srla64(<1 x i64> %ae_shift_d0, i32 %ars) +; CHECK: ae_srla64 aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_srli24.ll +declare <2 x i32> @llvm.xtensa.ae.srli24(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_srli24(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srli24: + + +%ret = call <2 x i32> @llvm.xtensa.ae.srli24(<2 x i32> %ae_shift_d0, i32 0) +; CHECK: ae_srli24 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_srli32.ll +declare <2 x i32> @llvm.xtensa.ae.srli32(<2 x i32>, i32) +define <2 x i32> @test_xtensa_ae_srli32(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srli32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.srli32(<2 x i32> %ae_shift_d0, i32 0) +; CHECK: ae_srli32 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_srli64.ll +declare <1 x i64> @llvm.xtensa.ae.srli64(<1 x i64>, i32) +define <1 x i64> @test_xtensa_ae_srli64(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srli64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.srli64(<1 x i64> %ae_shift_d0, i32 0) +; CHECK: ae_srli64 aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_srls24.ll +declare <2 x i32> @llvm.xtensa.ae.srls24(<2 x i32>) +define <2 x i32> @test_xtensa_ae_srls24(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srls24: + +%ret = call <2 x i32> @llvm.xtensa.ae.srls24(<2 x i32> %ae_shift_d0) +; CHECK: ae_srls24 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_srls32.ll +declare <2 x i32> @llvm.xtensa.ae.srls32(<2 x i32>) +define <2 x i32> @test_xtensa_ae_srls32(<2 x i32> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srls32: + +%ret = call <2 x i32> @llvm.xtensa.ae.srls32(<2 x i32> %ae_shift_d0) +; CHECK: ae_srls32 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_srls64.ll +declare <1 x i64> @llvm.xtensa.ae.srls64(<1 x i64>) +define <1 x i64> @test_xtensa_ae_srls64(<1 x i64> %ae_shift_d0) { +; CHECK-LABEL: test_xtensa_ae_srls64: + +%ret = call <1 x i64> @llvm.xtensa.ae.srls64(<1 x i64> %ae_shift_d0) +; CHECK: ae_srls64 aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_sub16.ll +declare <4 x i16> @llvm.xtensa.ae.sub16(<4 x i16>, <4 x i16>) +define <4 x i16> @test_xtensa_ae_sub16(<4 x i16> %ae_arth_v0, <4 x i16> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sub16: + + +%ret = call <4 x i16> @llvm.xtensa.ae.sub16(<4 x i16> %ae_arth_v0, <4 x i16> %ae_arth_v1) +; CHECK: ae_sub16 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_sub16s.ll +declare <4 x i16> @llvm.xtensa.ae.sub16s(<4 x i16>, <4 x i16>) +define <4 x i16> @test_xtensa_ae_sub16s(<4 x i16> %ae_arth_v0, <4 x i16> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sub16s: + + +%ret = call <4 x i16> @llvm.xtensa.ae.sub16s(<4 x i16> %ae_arth_v0, <4 x i16> %ae_arth_v1) +; CHECK: ae_sub16s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <4 x i16> %ret +} + + +;--- ae_sub24s.ll +declare <2 x i32> @llvm.xtensa.ae.sub24s(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_sub24s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sub24s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.sub24s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_sub24s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sub32.ll +declare <2 x i32> @llvm.xtensa.ae.sub32(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_sub32(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sub32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.sub32(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_sub32 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sub32s.ll +declare <2 x i32> @llvm.xtensa.ae.sub32s(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_sub32s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sub32s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.sub32s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_sub32s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_sub64.ll +declare <1 x i64> @llvm.xtensa.ae.sub64(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_sub64(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sub64: + + +%ret = call <1 x i64> @llvm.xtensa.ae.sub64(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_sub64 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_sub64s.ll +declare <1 x i64> @llvm.xtensa.ae.sub64s(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_sub64s(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_sub64s: + + +%ret = call <1 x i64> @llvm.xtensa.ae.sub64s(<1 x i64> %ae_arth_v0, <1 x i64> %ae_arth_v1) +; CHECK: ae_sub64s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_subadd32.ll +declare <2 x i32> @llvm.xtensa.ae.subadd32(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_subadd32(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_subadd32: + + +%ret = call <2 x i32> @llvm.xtensa.ae.subadd32(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_subadd32 aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_subadd32s.ll +declare <2 x i32> @llvm.xtensa.ae.subadd32s(<2 x i32>, <2 x i32>) +define <2 x i32> @test_xtensa_ae_subadd32s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) { +; CHECK-LABEL: test_xtensa_ae_subadd32s: + + +%ret = call <2 x i32> @llvm.xtensa.ae.subadd32s(<2 x i32> %ae_arth_v0, <2 x i32> %ae_arth_v1) +; CHECK: ae_subadd32s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_trunca32f64s_l.ll +declare <2 x i32> @llvm.xtensa.ae.trunca32f64s.l(<2 x i32>, <1 x i64>, i32) +define <2 x i32> @test_xtensa_ae_trunca32f64s_l(<2 x i32> %ae_shift_d0, <1 x i64> %ae_shift_sd, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_trunca32f64s_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.trunca32f64s.l(<2 x i32> %ae_shift_d0, <1 x i64> %ae_shift_sd, i32 %ars) +; CHECK: ae_trunca32f64s.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_trunca32x2f64s.ll +declare <2 x i32> @llvm.xtensa.ae.trunca32x2f64s(<1 x i64>, <1 x i64>, i32) +define <2 x i32> @test_xtensa_ae_trunca32x2f64s(<1 x i64> %ae_shift_d0, <1 x i64> %ae_shift_sd, i32 %ars) { +; CHECK-LABEL: test_xtensa_ae_trunca32x2f64s: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.trunca32x2f64s(<1 x i64> %ae_shift_d0, <1 x i64> %ae_shift_sd, i32 %ars) +; CHECK: ae_trunca32x2f64s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, a{{[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_trunci32f64s_l.ll +declare <2 x i32> @llvm.xtensa.ae.trunci32f64s.l(<2 x i32>, <1 x i64>, i32) +define <2 x i32> @test_xtensa_ae_trunci32f64s_l(<2 x i32> %ae_shift_d0, <1 x i64> %ae_shift_sd) { +; CHECK-LABEL: test_xtensa_ae_trunci32f64s_l: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.trunci32f64s.l(<2 x i32> %ae_shift_d0, <1 x i64> %ae_shift_sd, i32 0) +; CHECK: ae_trunci32f64s.l aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_trunci32x2f64s.ll +declare <2 x i32> @llvm.xtensa.ae.trunci32x2f64s(<1 x i64>, <1 x i64>, i32) +define <2 x i32> @test_xtensa_ae_trunci32x2f64s(<1 x i64> %ae_shift_d0, <1 x i64> %ae_shift_sd) { +; CHECK-LABEL: test_xtensa_ae_trunci32x2f64s: + + + +%ret = call <2 x i32> @llvm.xtensa.ae.trunci32x2f64s(<1 x i64> %ae_shift_d0, <1 x i64> %ae_shift_sd, i32 0) +; CHECK: ae_trunci32x2f64s aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}}, {{-?[0-9]+}} + +ret <2 x i32> %ret +} + + +;--- ae_vldl16c.ll +declare ptr @llvm.xtensa.ae.vldl16c(ptr) +define ptr @test_xtensa_ae_vldl16c(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vldl16c: + +%ret = call ptr @llvm.xtensa.ae.vldl16c(ptr %ars) +; CHECK: ae_vldl16c a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_vldl16c_ic.ll +declare ptr @llvm.xtensa.ae.vldl16c.ic(ptr) +define ptr @test_xtensa_ae_vldl16c_ic(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vldl16c_ic: + +%ret = call ptr @llvm.xtensa.ae.vldl16c.ic(ptr %ars) +; CHECK: ae_vldl16c.ic a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_vldl16c_ip.ll +declare ptr @llvm.xtensa.ae.vldl16c.ip(ptr) +define ptr @test_xtensa_ae_vldl16c_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vldl16c_ip: + +%ret = call ptr @llvm.xtensa.ae.vldl16c.ip(ptr %ars) +; CHECK: ae_vldl16c.ip a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_vldl16t.ll +declare { <1 x i1>, i32 } @llvm.xtensa.ae.vldl16t(ptr) +define <1 x i1> @test_xtensa_ae_vldl16t(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vldl16t: + +%ret = call { <1 x i1>, i32 } @llvm.xtensa.ae.vldl16t(ptr %ars) +; CHECK: ae_vldl16t b{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <1 x i1>, i32 } %ret, 0 +ret <1 x i1> %ev +} + + +;--- ae_vldl32t.ll +declare { <1 x i1>, i32 } @llvm.xtensa.ae.vldl32t(ptr) +define <1 x i1> @test_xtensa_ae_vldl32t(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vldl32t: + +%ret = call { <1 x i1>, i32 } @llvm.xtensa.ae.vldl32t(ptr %ars) +; CHECK: ae_vldl32t b{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <1 x i1>, i32 } %ret, 0 +ret <1 x i1> %ev +} + + +;--- ae_vldsht.ll +declare void @llvm.xtensa.ae.vldsht(i32) +define void @test_xtensa_ae_vldsht(i32 %art) { +; CHECK-LABEL: test_xtensa_ae_vldsht: + + call void @llvm.xtensa.ae.vldsht(i32 %art) +; CHECK: ae_vldsht a{{[0-9]+}} + +ret void +} + + +;--- ae_vlel16t.ll +declare { <1 x i1>, i32 } @llvm.xtensa.ae.vlel16t(i32, ptr) +define <1 x i1> @test_xtensa_ae_vlel16t(i32 %art, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vlel16t: + + +%ret = call { <1 x i1>, i32 } @llvm.xtensa.ae.vlel16t(i32 %art, ptr %ars) +; CHECK: ae_vlel16t b{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <1 x i1>, i32 } %ret, 0 +ret <1 x i1> %ev +} + + +;--- ae_vlel32t.ll +declare { <1 x i1>, i32 } @llvm.xtensa.ae.vlel32t(i32, ptr) +define <1 x i1> @test_xtensa_ae_vlel32t(i32 %art, ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vlel32t: + + +%ret = call { <1 x i1>, i32 } @llvm.xtensa.ae.vlel32t(i32 %art, ptr %ars) +; CHECK: ae_vlel32t b{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +%ev = extractvalue { <1 x i1>, i32 } %ret, 0 +ret <1 x i1> %ev +} + + +;--- ae_vles16c.ll +declare ptr @llvm.xtensa.ae.vles16c(ptr) +define ptr @test_xtensa_ae_vles16c(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vles16c: + +%ret = call ptr @llvm.xtensa.ae.vles16c(ptr %ars) +; CHECK: ae_vles16c a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_vles16c_ic.ll +declare ptr @llvm.xtensa.ae.vles16c.ic(ptr) +define ptr @test_xtensa_ae_vles16c_ic(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vles16c_ic: + +%ret = call ptr @llvm.xtensa.ae.vles16c.ic(ptr %ars) +; CHECK: ae_vles16c.ic a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_vles16c_ip.ll +declare ptr @llvm.xtensa.ae.vles16c.ip(ptr) +define ptr @test_xtensa_ae_vles16c_ip(ptr %ars) { +; CHECK-LABEL: test_xtensa_ae_vles16c_ip: + +%ret = call ptr @llvm.xtensa.ae.vles16c.ip(ptr %ars) +; CHECK: ae_vles16c.ip a{{[0-9]+}} + +ret ptr %ret +} + + +;--- ae_xor.ll +declare <1 x i64> @llvm.xtensa.ae.xor(<1 x i64>, <1 x i64>) +define <1 x i64> @test_xtensa_ae_xor(<1 x i64> %ae_dr_to_dr_v0, <1 x i64> %ae_dr_to_dr_v1) { +; CHECK-LABEL: test_xtensa_ae_xor: + + +%ret = call <1 x i64> @llvm.xtensa.ae.xor(<1 x i64> %ae_dr_to_dr_v0, <1 x i64> %ae_dr_to_dr_v1) +; CHECK: ae_xor aed{{[0-9]+}}, aed{{[0-9]+}}, aed{{[0-9]+}} + +ret <1 x i64> %ret +} + + +;--- ae_zalign64.ll +declare <8 x i8> @llvm.xtensa.ae.zalign64() +define <8 x i8> @test_xtensa_ae_zalign64() { +; CHECK-LABEL: test_xtensa_ae_zalign64: + +%ret = call <8 x i8> @llvm.xtensa.ae.zalign64() +; CHECK: ae_zalign64 u{{[0-9]+}} + +ret <8 x i8> %ret +} + + +;--- rur_ae_bithead.ll +declare i32 @llvm.xtensa.rur.ae.bithead() +define i32 @test_xtensa_rur_ae_bithead() { +; CHECK-LABEL: test_xtensa_rur_ae_bithead: + +%ret = call i32 @llvm.xtensa.rur.ae.bithead() +; CHECK: rur.ae_bithead a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_bitptr.ll +declare i32 @llvm.xtensa.rur.ae.bitptr() +define i32 @test_xtensa_rur_ae_bitptr() { +; CHECK-LABEL: test_xtensa_rur_ae_bitptr: + +%ret = call i32 @llvm.xtensa.rur.ae.bitptr() +; CHECK: rur.ae_bitptr a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_bitsused.ll +declare i32 @llvm.xtensa.rur.ae.bitsused() +define i32 @test_xtensa_rur_ae_bitsused() { +; CHECK-LABEL: test_xtensa_rur_ae_bitsused: + +%ret = call i32 @llvm.xtensa.rur.ae.bitsused() +; CHECK: rur.ae_bitsused a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_cbegin0.ll +declare i32 @llvm.xtensa.rur.ae.cbegin0() +define i32 @test_xtensa_rur_ae_cbegin0() { +; CHECK-LABEL: test_xtensa_rur_ae_cbegin0: + +%ret = call i32 @llvm.xtensa.rur.ae.cbegin0() +; CHECK: rur.ae_cbegin0 a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_cend0.ll +declare i32 @llvm.xtensa.rur.ae.cend0() +define i32 @test_xtensa_rur_ae_cend0() { +; CHECK-LABEL: test_xtensa_rur_ae_cend0: + +%ret = call i32 @llvm.xtensa.rur.ae.cend0() +; CHECK: rur.ae_cend0 a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_cw_sd_no.ll +declare i32 @llvm.xtensa.rur.ae.cw.sd.no() +define i32 @test_xtensa_rur_ae_cw_sd_no() { +; CHECK-LABEL: test_xtensa_rur_ae_cw_sd_no: + +%ret = call i32 @llvm.xtensa.rur.ae.cw.sd.no() +; CHECK: rur.ae_cw_sd_no a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_cwrap.ll +declare i32 @llvm.xtensa.rur.ae.cwrap() +define i32 @test_xtensa_rur_ae_cwrap() { +; CHECK-LABEL: test_xtensa_rur_ae_cwrap: + +%ret = call i32 @llvm.xtensa.rur.ae.cwrap() +; CHECK: rur.ae_cwrap a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_first_ts.ll +declare i32 @llvm.xtensa.rur.ae.first.ts() +define i32 @test_xtensa_rur_ae_first_ts() { +; CHECK-LABEL: test_xtensa_rur_ae_first_ts: + +%ret = call i32 @llvm.xtensa.rur.ae.first.ts() +; CHECK: rur.ae_first_ts a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_nextoffset.ll +declare i32 @llvm.xtensa.rur.ae.nextoffset() +define i32 @test_xtensa_rur_ae_nextoffset() { +; CHECK-LABEL: test_xtensa_rur_ae_nextoffset: + +%ret = call i32 @llvm.xtensa.rur.ae.nextoffset() +; CHECK: rur.ae_nextoffset a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_overflow.ll +declare i32 @llvm.xtensa.rur.ae.overflow() +define i32 @test_xtensa_rur_ae_overflow() { +; CHECK-LABEL: test_xtensa_rur_ae_overflow: + +%ret = call i32 @llvm.xtensa.rur.ae.overflow() +; CHECK: rur.ae_overflow a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_ovf_sar.ll +declare i32 @llvm.xtensa.rur.ae.ovf.sar() +define i32 @test_xtensa_rur_ae_ovf_sar() { +; CHECK-LABEL: test_xtensa_rur_ae_ovf_sar: + +%ret = call i32 @llvm.xtensa.rur.ae.ovf.sar() +; CHECK: rur.ae_ovf_sar a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_sar.ll +declare i32 @llvm.xtensa.rur.ae.sar() +define i32 @test_xtensa_rur_ae_sar() { +; CHECK-LABEL: test_xtensa_rur_ae_sar: + +%ret = call i32 @llvm.xtensa.rur.ae.sar() +; CHECK: rur.ae_sar a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_searchdone.ll +declare i32 @llvm.xtensa.rur.ae.searchdone() +define i32 @test_xtensa_rur_ae_searchdone() { +; CHECK-LABEL: test_xtensa_rur_ae_searchdone: + +%ret = call i32 @llvm.xtensa.rur.ae.searchdone() +; CHECK: rur.ae_searchdone a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_tablesize.ll +declare i32 @llvm.xtensa.rur.ae.tablesize() +define i32 @test_xtensa_rur_ae_tablesize() { +; CHECK-LABEL: test_xtensa_rur_ae_tablesize: + +%ret = call i32 @llvm.xtensa.rur.ae.tablesize() +; CHECK: rur.ae_tablesize a{{[0-9]+}} + +ret i32 %ret +} + + +;--- rur_ae_ts_fts_bu_bp.ll +declare i32 @llvm.xtensa.rur.ae.ts.fts.bu.bp() +define i32 @test_xtensa_rur_ae_ts_fts_bu_bp() { +; CHECK-LABEL: test_xtensa_rur_ae_ts_fts_bu_bp: + +%ret = call i32 @llvm.xtensa.rur.ae.ts.fts.bu.bp() +; CHECK: rur.ae_ts_fts_bu_bp a{{[0-9]+}} + +ret i32 %ret +} + + +;--- wur_ae_bithead.ll +declare void @llvm.xtensa.wur.ae.bithead(i32) +define void @test_xtensa_wur_ae_bithead(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_bithead: + + call void @llvm.xtensa.wur.ae.bithead(i32 %art) +; CHECK: wur.ae_bithead a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_bitptr.ll +declare void @llvm.xtensa.wur.ae.bitptr(i32) +define void @test_xtensa_wur_ae_bitptr(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_bitptr: + + call void @llvm.xtensa.wur.ae.bitptr(i32 %art) +; CHECK: wur.ae_bitptr a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_bitsused.ll +declare void @llvm.xtensa.wur.ae.bitsused(i32) +define void @test_xtensa_wur_ae_bitsused(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_bitsused: + + call void @llvm.xtensa.wur.ae.bitsused(i32 %art) +; CHECK: wur.ae_bitsused a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_cbegin0.ll +declare void @llvm.xtensa.wur.ae.cbegin0(i32) +define void @test_xtensa_wur_ae_cbegin0(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_cbegin0: + + call void @llvm.xtensa.wur.ae.cbegin0(i32 %art) +; CHECK: wur.ae_cbegin0 a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_cend0.ll +declare void @llvm.xtensa.wur.ae.cend0(i32) +define void @test_xtensa_wur_ae_cend0(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_cend0: + + call void @llvm.xtensa.wur.ae.cend0(i32 %art) +; CHECK: wur.ae_cend0 a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_cw_sd_no.ll +declare void @llvm.xtensa.wur.ae.cw.sd.no(i32) +define void @test_xtensa_wur_ae_cw_sd_no(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_cw_sd_no: + + call void @llvm.xtensa.wur.ae.cw.sd.no(i32 %art) +; CHECK: wur.ae_cw_sd_no a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_cwrap.ll +declare void @llvm.xtensa.wur.ae.cwrap(i32) +define void @test_xtensa_wur_ae_cwrap(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_cwrap: + + call void @llvm.xtensa.wur.ae.cwrap(i32 %art) +; CHECK: wur.ae_cwrap a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_first_ts.ll +declare void @llvm.xtensa.wur.ae.first.ts(i32) +define void @test_xtensa_wur_ae_first_ts(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_first_ts: + + call void @llvm.xtensa.wur.ae.first.ts(i32 %art) +; CHECK: wur.ae_first_ts a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_nextoffset.ll +declare void @llvm.xtensa.wur.ae.nextoffset(i32) +define void @test_xtensa_wur_ae_nextoffset(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_nextoffset: + + call void @llvm.xtensa.wur.ae.nextoffset(i32 %art) +; CHECK: wur.ae_nextoffset a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_overflow.ll +declare void @llvm.xtensa.wur.ae.overflow(i32) +define void @test_xtensa_wur_ae_overflow(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_overflow: + + call void @llvm.xtensa.wur.ae.overflow(i32 %art) +; CHECK: wur.ae_overflow a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_ovf_sar.ll +declare void @llvm.xtensa.wur.ae.ovf.sar(i32) +define void @test_xtensa_wur_ae_ovf_sar(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_ovf_sar: + + call void @llvm.xtensa.wur.ae.ovf.sar(i32 %art) +; CHECK: wur.ae_ovf_sar a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_sar.ll +declare void @llvm.xtensa.wur.ae.sar(i32) +define void @test_xtensa_wur_ae_sar(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_sar: + + call void @llvm.xtensa.wur.ae.sar(i32 %art) +; CHECK: wur.ae_sar a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_searchdone.ll +declare void @llvm.xtensa.wur.ae.searchdone(i32) +define void @test_xtensa_wur_ae_searchdone(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_searchdone: + + call void @llvm.xtensa.wur.ae.searchdone(i32 %art) +; CHECK: wur.ae_searchdone a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_tablesize.ll +declare void @llvm.xtensa.wur.ae.tablesize(i32) +define void @test_xtensa_wur_ae_tablesize(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_tablesize: + + call void @llvm.xtensa.wur.ae.tablesize(i32 %art) +; CHECK: wur.ae_tablesize a{{[0-9]+}} + +ret void +} + + +;--- wur_ae_ts_fts_bu_bp.ll +declare void @llvm.xtensa.wur.ae.ts.fts.bu.bp(i32) +define void @test_xtensa_wur_ae_ts_fts_bu_bp(i32 %art) { +; CHECK-LABEL: test_xtensa_wur_ae_ts_fts_bu_bp: + + call void @llvm.xtensa.wur.ae.ts.fts.bu.bp(i32 %art) +; CHECK: wur.ae_ts_fts_bu_bp a{{[0-9]+}} + +ret void +} + + diff --git a/llvm/test/CodeGen/Xtensa/xtensa-vector-ops.ll b/llvm/test/CodeGen/Xtensa/xtensa-vector-ops.ll new file mode 100644 index 0000000000000..1e4df95919905 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-vector-ops.ll @@ -0,0 +1,329 @@ +; RUN: llc -mtriple=xtensa -mcpu=cnl %s -o - | FileCheck %s + +define i32 @test_2xi32toi32(<2 x i32> %a) { + ; CHECK-LABEL: test_2xi32toi32 + ; CHECK: ae_movad32.l a2, aed0 + %r = extractelement <2 x i32> %a, i32 0 + ret i32 %r +} + +define <2 x i32> @test_i32to2xi32(i32 %a) { + ; CHECK-LABEL: test_i32to2xi32 + ; CHECK: ae_movda32x2 aed0, a2, a2 + %vecinit = insertelement <2 x i32> undef, i32 %a, i64 0 + %vecinit1 = shufflevector <2 x i32> %vecinit, <2 x i32> poison, <2 x i32> zeroinitializer + ret <2 x i32> %vecinit1 +} + +define void @test_store_2xi32(i32 %a, <2 x i32> %v) { + ; CHECK-LABEL: test_store_2xi32 + ; CHECK: ae_s32x2.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + store <2 x i32> %v, ptr %p, align 8 + ret void +} + +define void @test_store_1xi64(i32 %a, <1 x i64> %v) { + ; CHECK-LABEL: test_store_1xi64 + %p = inttoptr i32 %a to ptr + ; CHECK: ae_s64.i aed0, a2, 0 + store <1 x i64> %v, ptr %p, align 8 + ret void +} + +define <1 x i64> @test_build_1xi64(i64 %v) { + ; CHECK-LABEL: test_build_1xi64 + ; CHECK: ae_movda32x2 aed0, a3, a2 + %vec = insertelement <1 x i64> undef, i64 %v, i64 0 + ret <1 x i64> %vec +} + +define void @test_store_4xi16(i32 %a, <4 x i16> %v) { + ; CHECK-LABEL: test_store_4xi16 + ; CHECK: ae_s16x4.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + store <4 x i16> %v, ptr %p, align 8 + ret void +} + +define <2 x i32> @test_load_2xi32(i32 %a) { + ; CHECK-LABEL: test_load_2xi32 + ; CHECK: ae_l32x2.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + %v = load <2 x i32>, ptr %p, align 8 + ret <2 x i32> %v +} + +define <1 x i64> @test_load_1xi64(i32 %a) { + ; CHECK-LABEL: test_load_1xi64 + ; CHECK: ae_l64.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + %v = load <1 x i64>, ptr %p, align 8 + ret <1 x i64> %v +} + +define <4 x i16> @test_load_4xi16(i32 %a) { + ; CHECK-LABEL: test_load_4xi16 + ; CHECK: ae_l16x4.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + %v = load <4 x i16>, ptr %p, align 8 + ret <4 x i16> %v +} + +define void @test_build_store_1xi32(i32 %a, i32 %v) { + ; CHECK-LABEL: test_build_store_1xi32 + ; CHECK: ae_movda32 [[A:aed[0-9]+]], a3 + %vec = insertelement <1 x i32> undef, i32 %v, i64 0 + %p = inttoptr i32 %a to ptr + ; CHECK: ae_s32.l.i [[A]], a2, 0 + store <1 x i32> %vec, ptr %p, align 8 + ret void +} + +define i32 @test_load_extract_1xi32(i32 %a) { + ; CHECK-LABEL: test_load_extract_1xi32 + %p = inttoptr i32 %a to ptr + ; CHECK: ae_l32.i [[A:aed[0-9]+]], a2, 0 + %vec = load <1 x i32>, ptr %p, align 8 + ; CHECK: ae_movad32.l a2, [[A]] + %r = extractelement <1 x i32> %vec, i32 0 + ret i32 %r +} + +define <4 x i16> @test_build_4xi16_2(i16 %a, i16 %b) { + ; CHECK-LABEL: test_build_4xi16_2 + ; CHECK: ae_movda16x2 aed0, a2, a3 + %vecinit = insertelement <4 x i16> undef, i16 %a, i64 0 + %vecinit1 = insertelement <4 x i16> %vecinit, i16 %b, i64 1 + %vecinit2 = insertelement <4 x i16> %vecinit1, i16 %a, i64 2 + %vecinit3 = insertelement <4 x i16> %vecinit2, i16 %b, i64 3 + ret <4 x i16> %vecinit3 +} + +define <4 x i16> @test_build_4xi16_1(i16 %a) { + ; CHECK-LABEL: test_build_4xi16_1 + ; CHECK: ae_movda16 aed0, a2 + %vecinit = insertelement <4 x i16> undef, i16 %a, i64 0 + %vecinit1 = shufflevector <4 x i16> %vecinit, <4 x i16> poison, <4 x i32> zeroinitializer + ret <4 x i16> %vecinit1 +} + +define i32 @test_extract(<2 x i32> %v2i, <1 x i32> %v1i, <4 x i16> %v4s, <1 x i64> %v1l) { + ; CHECK-LABEL: test_extract + ; CHECK-DAG: ae_movad32.h {{a[0-9]+}}, aed0 + %v2i0 = extractelement <2 x i32> %v2i, i64 0 + ; CHECK-DAG: ae_movad32.l {{a[0-9]+}}, aed0 + %v2i1 = extractelement <2 x i32> %v2i, i64 1 + %sum1 = add i32 %v2i0, %v2i1 + ; CHECK-DAG: ae_movad32.l {{a[0-9]+}}, aed1 + %v1i0 = extractelement <1 x i32> %v1i, i64 0 + %sum2 = add i32 %sum1, %v1i0 + ; CHECK-DAG: ae_movad16.0 {{a[0-9]+}}, aed2 + %v4s0 = extractelement <4 x i16> %v4s, i64 0 + %v4s0i = zext i16 %v4s0 to i32 + %sum3 = add i32 %v4s0i, %sum2 + ; CHECK-DAG: ae_movad16.1 {{a[0-9]+}}, aed2 + %v4s1 = extractelement <4 x i16> %v4s, i64 1 + %v4s1i = zext i16 %v4s1 to i32 + %sum4 = add i32 %v4s1i, %sum3 + ; CHECK-DAG: ae_movad16.2 {{a[0-9]+}}, aed2 + %v4s2 = extractelement <4 x i16> %v4s, i64 2 + %v4s2i = zext i16 %v4s2 to i32 + %sum5 = add i32 %v4s2i, %sum4 + ; CHECK-DAG: ae_movad16.3 {{a[0-9]+}}, aed2 + %v4s3 = extractelement <4 x i16> %v4s, i64 3 + %v4s3i = zext i16 %v4s3 to i32 + %sum6 = add i32 %v4s3i, %sum5 + ; CHECK-DAG: ae_movad32.l {{a[0-9]+}}, aed3 + %v1l0 = extractelement <1 x i64> %v1l, i64 0 + %v1l0l = trunc i64 %v1l0 to i32 + %sum7 = add i32 %v1l0l, %sum6 + + ret i32 %sum7 +} + +define <1 x i32> @test_extract_subvec_1x32(<2 x i32> %v) { + ; CHECK-LABEL: test_extract_subvec_1x32 + ; CHECK: ae_movad32.l {{a[0-9]+}}, aed0 + ; CHECK: ae_movda32 aed0, {{a[0-9]+}} + %shuffle = shufflevector <2 x i32> %v, <2 x i32> poison, <1 x i32> zeroinitializer + ret <1 x i32> %shuffle +} + + +define <4 x i16> @rlshift4(<4 x i16> %a, i16 signext %b) { + ; CHECK-LABEL: rlshift4: + ; CHECK: ssr {{a[0-9]+}} + %v = insertelement <4 x i16> undef, i16 %b, i64 0 + %sh_prom = shufflevector <4 x i16> %v, <4 x i16> poison, <4 x i32> zeroinitializer + %shr = lshr <4 x i16> %a, %sh_prom + ret <4 x i16> %shr +} + + +define <4 x i16> @rlshift4_imm(<4 x i16> %a) { + ; CHECK-LABEL: rlshift4_imm: + ; CHECK: srli {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shr = lshr <4 x i16> %a, + ret <4 x i16> %shr +} + + +define <2 x i32> @rlshift2(<2 x i32> %a, i32 %b) { + ; CHECK-LABEL: rlshift2: + ; CHECK: ssr {{a[0-9]+}} + %splat.splatinsert = insertelement <2 x i32> poison, i32 %b, i64 0 + %splat.splat = shufflevector <2 x i32> %splat.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer + %shr = lshr <2 x i32> %a, %splat.splat + ret <2 x i32> %shr +} + + +define <2 x i32> @rlshift2_imm(<2 x i32> %a) { + ; CHECK-LABEL: rlshift2_imm: + ; CHECK: srli {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shr = lshr <2 x i32> %a, + ret <2 x i32> %shr +} + +define <1 x i64> @rlshift1(<1 x i64> %a, i32 %b) { + ; CHECK-LABEL: rlshift1: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: src {{a[0-9]+}} + ; CHECK: srl {{a[0-9]+}} + %splat.splatinsert = insertelement <1 x i32> poison, i32 %b, i64 0 + %sh_prom = zext <1 x i32> %splat.splatinsert to <1 x i64> + %shr = lshr <1 x i64> %a, %sh_prom + ret <1 x i64> %shr +} + +define <1 x i64> @rlshift1_imm(<1 x i64> %a) { + ; CHECK-LABEL: rlshift1_imm: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: src {{a[0-9]+}} + ; CHECK: srl {{a[0-9]+}} + %shr = lshr <1 x i64> %a, + ret <1 x i64> %shr +} + +define <4 x i16> @rashift4(<4 x i16> %a, i16 signext %b) { + ; CHECK-LABEL: rashift4: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: sra {{a[0-9]+}} + %v = insertelement <4 x i16> undef, i16 %b, i64 0 + %sh_prom = shufflevector <4 x i16> %v, <4 x i16> poison, <4 x i32> zeroinitializer + %shr = ashr <4 x i16> %a, %sh_prom + ret <4 x i16> %shr +} + +define <4 x i16> @rashift4_imm(<4 x i16> %a) { + ; CHECK-LABEL: rashift4_imm: + ; CHECK: srai {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shr = ashr <4 x i16> %a, + ret <4 x i16> %shr +} + +define <2 x i32> @rashift2(<2 x i32> %a, i32 %b) { + ; CHECK-LABEL: rashift2: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: sra {{a[0-9]+}} + %splat.splatinsert = insertelement <2 x i32> poison, i32 %b, i64 0 + %splat.splat = shufflevector <2 x i32> %splat.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer + %shr = ashr <2 x i32> %a, %splat.splat + ret <2 x i32> %shr +} + + +define <2 x i32> @rashift2_imm(<2 x i32> %a) { + ; CHECK-LABEL: rashift2_imm: + ; CHECK: srai {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shr = ashr <2 x i32> %a, + ret <2 x i32> %shr +} + + +define <1 x i64> @rashift1(<1 x i64> %a, i64 %b) { + ; CHECK-LABEL: rashift1: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: sra {{a[0-9]+}} + %splat.splatinsert = insertelement <1 x i64> poison, i64 %b, i64 0 + %shr = ashr <1 x i64> %a, %splat.splatinsert + ret <1 x i64> %shr +} + + +define <1 x i64> @rashift1_imm(<1 x i64> %a) { + ; CHECK-LABEL: rashift1_imm: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: sra {{a[0-9]+}} + %shr = ashr <1 x i64> %a, + ret <1 x i64> %shr +} + + +define <4 x i16> @lshift4(<4 x i16> %a, i16 signext %b) { + ; CHECK-LABEL: lshift4: + ; CHECK: ssl {{a[0-9]+}} + ; CHECK: sll {{a[0-9]+}} + %v = insertelement <4 x i16> undef, i16 %b, i64 0 + %sh_prom = shufflevector <4 x i16> %v, <4 x i16> poison, <4 x i32> zeroinitializer + %shl = shl <4 x i16> %a, %sh_prom + ret <4 x i16> %shl +} + + +define <4 x i16> @lshift4_imm(<4 x i16> %a) { + ; CHECK-LABEL: lshift4_imm: + ; CHECK: slli {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shl = shl <4 x i16> %a, + ret <4 x i16> %shl +} + + +define <2 x i32> @lshift2(<2 x i32> %a, i32 %b) { + ; CHECK-LABEL: lshift2: + ; CHECK: ssl {{a[0-9]+}} + ; CHECK: sll {{a[0-9]+}} + %splat.splatinsert = insertelement <2 x i32> poison, i32 %b, i64 0 + %splat.splat = shufflevector <2 x i32> %splat.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer + %shl = shl <2 x i32> %a, %splat.splat + ret <2 x i32> %shl +} + + +define <2 x i32> @lshift2_imm(<2 x i32> %a) { + ; CHECK-LABEL: lshift2_imm: + ; CHECK: slli {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shl = shl <2 x i32> %a, + ret <2 x i32> %shl +} + + +define <1 x i64> @lshift1(<1 x i64> %a, i64 %b) { + ; CHECK-LABEL: lshift1: + ; CHECK: ssl {{a[0-9]+}} + ; CHECK: sll {{a[0-9]+}} + %splat.splatinsert = insertelement <1 x i64> poison, i64 %b, i64 0 + %shl = shl <1 x i64> %a, %splat.splatinsert + ret <1 x i64> %shl +} + + +define <1 x i64> @lshift1_imm(<1 x i64> %a) { + ; CHECK-LABEL: lshift1_imm: + ; CHECK: ssl {{a[0-9]+}} + ; CHECK: sll {{a[0-9]+}} + %shl = shl <1 x i64> %a, + ret <1 x i64> %shl +} + +define void @test_valign_load_store(i32 %p1, i32 %p2) { + ; CHECK-LABEL: test_valign_load_store: + %ptr1 = inttoptr i32 %p1 to ptr + %ptr2 = inttoptr i32 %p2 to ptr + ; CHECK: ae_lalign64.i [[V:u[0-3]]], {{a[0-9]+}}, 0 + %v = load <8 x i8>, ptr %ptr1, align 8 + ; CHECK: ae_salign64.i [[V]], {{a[0-9]+}}, 0 + store <8 x i8> %v, ptr %ptr2, align 8 + ret void +} diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll new file mode 100644 index 0000000000000..17eb2b5fefee5 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll @@ -0,0 +1,37 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define <1 x i1> @test_xtbool_trunc(i32 %a) { + ; CHECK-LABEL: test_xtbool_trunc + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: and [[AND:a[0-9]+]], {{a[0-9]+}}, {{a[0-9]+}} + ; CHECK: or [[OR:a[0-9]+]], [[AND]], a2 + ; CHECK: wsr [[OR]], br + %trunc = trunc i32 %a to i1 + %vec = insertelement <1 x i1> poison, i1 %trunc, i64 0 + ret <1 x i1> %vec +} + +define i32 @test_xtbool_zext(<1 x i1> %b) { + ; CHECK-LABEL: test_xtbool_zext + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: extui {{a[0-9]+}}, [[BREG]], 0, 1 + %bit = extractelement <1 x i1> %b, i64 0 + %int = zext i1 %bit to i32 + ret i32 %int +} + + +define <2 x i1> @test_xtbool2_build(i32 %a, i32 %b) { + ; CHECK-LABEL: test_xtbool2_build: + ; CHECK: slli {{a[0-9]+}}, {{a[0-9]+}}, 1 + ; CHECK: or {{a[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}} + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: and [[AND:a[0-9]+]], {{a[0-9]+}}, {{a[0-9]+}} + ; CHECK: or [[OR:a[0-9]+]], [[AND]], {{a[0-9]+}} + ; CHECK: wsr [[OR]], br + %tobool = icmp ne i32 %a, 0 + %vecinit = insertelement <2 x i1> undef, i1 %tobool, i64 0 + %tobool1 = icmp ne i32 %b, 0 + %vecinit2 = insertelement <2 x i1> %vecinit, i1 %tobool1, i64 1 + ret <2 x i1> %vecinit2 +} diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll new file mode 100644 index 0000000000000..cdb36c4667465 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll @@ -0,0 +1,74 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define float @test_xtbool_movt(float %a, float %b, <1 x i1> %c) { + ; CHECK-LABEL: test_xtbool_movt + ; CHECK: movt.s {{f[0-9]+}}, {{f[0-9]+}}, b0 + %f = call float @llvm.xtensa.xt.movt.s(float %a, float %b, <1 x i1> %c) + ret float %f +} + +define float @test_xtbool_movf(float %a, float %b, <1 x i1> %c) { + ; CHECK-LABEL: test_xtbool_movf + ; CHECK: movf.s {{f[0-9]+}}, {{f[0-9]+}}, b0 + %f = call float @llvm.xtensa.xt.movf.s(float %a, float %b, <1 x i1> %c) + ret float %f +} + +define <1 x i1> @test_xtbool_oeq_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_oeq_s + ; CHECK: oeq.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.oeq.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_ueq_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_ueq_s + ; CHECK: ueq.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.ueq.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_ole_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_ole_s + ; CHECK: ole.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.ole.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_ule_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_ule_s + ; CHECK: ule.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.ule.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_olt_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_olt_s + ; CHECK: olt.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.olt.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_ult_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_ult_s + ; CHECK: ult.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.ult.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_un_s(float %a, float %b) { + ; CHECK-LABEL: test_un_s + ; CHECK: un.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.un.s(float %a, float %b) + ret <1 x i1> %r +} + +declare float @llvm.xtensa.xt.movt.s(float, float, <1 x i1>); +declare float @llvm.xtensa.xt.movf.s(float, float, <1 x i1>); +declare <1 x i1> @llvm.xtensa.xt.oeq.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.ueq.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.ole.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.ule.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.olt.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.ult.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.un.s(float %a, float %b); \ No newline at end of file diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-select.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-select.ll new file mode 100644 index 0000000000000..b3f4b0e3dfa6a --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-select.ll @@ -0,0 +1,10 @@ +; RUN: llc -O0 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define <1 x i1> @test_select(<1 x i1> %cc, <1 x i1> %t, <1 x i1> %f) { + ; CHECK-LABEL: test_select + ; CHECK-DAG: andb [[EQ:b[0-9]+]], {{b[0-9]+}}, {{b[0-9]+}} + ; CHECK-DAG: andbc [[NE:b[0-9]+]], {{b[0-9]+}}, {{b[0-9]+}} + ; CHECK: orb {{b[0-9]+}}, [[EQ]], [[NE]] + %r = select <1 x i1> %cc, <1 x i1> %t, <1 x i1> %f + ret <1 x i1> %r +} diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll new file mode 100644 index 0000000000000..3381657d294b9 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll @@ -0,0 +1,67 @@ +; RUN: llc -O0 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define <1 x i1> @test_spill(<1 x i1> %b0, <1 x i1> %b1) +{ + ; CHECK-LABEL: test_spill + ; CHECK: rsr {{a[0-9]+}}, br + ; CHECK: s8i {{a[0-9]+}} + ; CHECK: callx8 a8 + %b2 = call <1 x i1> @get_xtbool() + + ; CHECK: l8ui {{a[0-9]+}} + ; CHECK: rsr {{a[0-9]+}}, br + ; CHECK: wsr {{a[0-9]+}}, br + ; CHECK: orb {{b[0-9]+}} + %r0 = or <1 x i1> %b0, %b1 + ret <1 x i1> %r0 +} + +declare <1 x i1> @get_xtbool() + +define <1 x i1> @test_xtbool_load(i32 %addr) { + ; CHECK-LABEL: test_xtbool_load + ; CHECK: l8ui {{a[0-9]+}} + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: and [[AND:a[0-9]+]], {{a[0-9]+}}, [[BREG]] + ; CHECK: or [[OR:a[0-9]+]], [[AND]], {{a[0-9]+}} + ; CHECK: wsr [[OR]], br + %ptr = inttoptr i32 %addr to ptr + %load_bits = load <8 x i1>, ptr %ptr, align 1 + %extractvec = shufflevector <8 x i1> %load_bits, <8 x i1> poison, <1 x i32> zeroinitializer + ret <1 x i1> %extractvec +} + +define void @test_xtbool_store(i32 %addr, <1 x i1> %b) { + ; CHECK-LABEL: test_xtbool_store: + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: extui [[DST:a[0-9]+]], [[BREG]], 0, 1 + ; CHECK: s8i [[DST]], {{a[0-9]+}}, {{[0-9]+}} + %ptr = inttoptr i32 %addr to ptr + %insertvec = shufflevector <1 x i1> %b, <1 x i1> poison, <8 x i32> + store <8 x i1> %insertvec, ptr %ptr, align 1 + ret void +} +define <2 x i1> @test_xtbool2_load(i32 %addr) { + ; CHECK-LABEL: test_xtbool2_load: + ; CHECK: l8ui {{a[0-9]+}} + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: and [[AND:a[0-9]+]], {{a[0-9]+}}, [[BREG]] + ; CHECK: or [[OR:a[0-9]+]], [[AND]], {{a[0-9]+}} + ; CHECK: wsr [[OR]], br + %ptr = inttoptr i32 %addr to ptr + %load_bits = load <8 x i1>, ptr %ptr, align 1 + %extractvec = shufflevector <8 x i1> %load_bits, <8 x i1> poison,<2 x i32> + ret <2 x i1> %extractvec +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn writeonly +define void @test_xtbool2_store(i32 %p, <2 x i1> %v) { + ; CHECK-LABEL: test_xtbool2_store: + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: extui [[DST:a[0-9]+]], [[BREG]], 0, 2 + ; CHECK: s8i [[DST]], {{a[0-9]+}}, {{[0-9]+}} + %ptr = inttoptr i32 %p to ptr + %insertvec = shufflevector <2 x i1> %v, <2 x i1> poison, <8 x i32> + store <8 x i1> %insertvec, ptr %ptr, align 1 + ret void +}