vint8m1_t __riscv_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2,
vint8m1_t vs1, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2,
vint16m1_t vs1, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2,
vint32m1_t vs1, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2,
vint64m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2,
vint8m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2,
vint16m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2,
vint32m1_t vs1, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2,
vint64m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2,
vint8m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2,
vint16m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2,
vint32m1_t vs1, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2,
vint64m1_t vs1, size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2,
vint8m1_t vs1, size_t vl);
vint16m1_t __riscv_vredand_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredand_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2,
vint16m1_t vs1, size_t vl);
vint32m1_t __riscv_vredand_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2,
vint32m1_t vs1, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2,
vint64m1_t vs1, size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2,
vint8m1_t vs1, size_t vl);
vint16m1_t __riscv_vredor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2,
vint16m1_t vs1, size_t vl);
vint32m1_t __riscv_vredor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2,
vint32m1_t vs1, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2,
vint64m1_t vs1, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2,
vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2,
vint8m1_t vs1, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2,
vint16m1_t vs1, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2,
vint32m1_t vs1, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2,
vint64m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2,
vuint64m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t vd,
vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t vd,
vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t vd,
vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2,
vuint64m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t vd,
vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t vd,
vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t vd,
vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2,
vuint64m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2,
vuint64m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2,
vuint64m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2,
vuint8m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2,
vuint64m1_t vs1, size_t vl);
// masked functions
vint8m1_t __riscv_vredsum_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd,
vint8mf8_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredsum_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd,
vint8mf4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredsum_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd,
vint8mf2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd,
vint8m1_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd,
vint8m2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd,
vint8m4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd,
vint8m8_t vs2, vint8m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredsum_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd,
vint16mf4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredsum_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd,
vint16mf2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd,
vint16m1_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd,
vint16m2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd,
vint16m4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd,
vint16m8_t vs2, vint16m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredsum_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd,
vint32mf2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd,
vint32m1_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd,
vint32m2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd,
vint32m4_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd,
vint32m8_t vs2, vint32m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd,
vint64m1_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd,
vint64m2_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd,
vint64m4_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd,
vint64m8_t vs2, vint64m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd,
vint8mf8_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd,
vint8mf4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd,
vint8mf2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd,
vint8m1_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd,
vint8m2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd,
vint8m4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd,
vint8m8_t vs2, vint8m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmax_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd,
vint16mf4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmax_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd,
vint16mf2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd,
vint16m1_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd,
vint16m2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd,
vint16m4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd,
vint16m8_t vs2, vint16m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmax_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd,
vint32mf2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd,
vint32m1_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd,
vint32m2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd,
vint32m4_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd,
vint32m8_t vs2, vint32m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd,
vint64m1_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd,
vint64m2_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd,
vint64m4_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd,
vint64m8_t vs2, vint64m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd,
vint8mf8_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd,
vint8mf4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd,
vint8mf2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd,
vint8m1_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd,
vint8m2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd,
vint8m4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd,
vint8m8_t vs2, vint8m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmin_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd,
vint16mf4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmin_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd,
vint16mf2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd,
vint16m1_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd,
vint16m2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd,
vint16m4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd,
vint16m8_t vs2, vint16m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmin_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd,
vint32mf2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd,
vint32m1_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd,
vint32m2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd,
vint32m4_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd,
vint32m8_t vs2, vint32m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd,
vint64m1_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd,
vint64m2_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd,
vint64m4_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd,
vint64m8_t vs2, vint64m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd,
vint8mf8_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd,
vint8mf4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd,
vint8mf2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredand_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd,
vint8m1_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredand_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd,
vint8m2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredand_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd,
vint8m4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredand_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd,
vint8m8_t vs2, vint8m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredand_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd,
vint16mf4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredand_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd,
vint16mf2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredand_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd,
vint16m1_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredand_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd,
vint16m2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredand_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd,
vint16m4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredand_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd,
vint16m8_t vs2, vint16m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredand_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd,
vint32mf2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredand_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd,
vint32m1_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredand_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd,
vint32m2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredand_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd,
vint32m4_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredand_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd,
vint32m8_t vs2, vint32m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredand_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd,
vint64m1_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredand_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd,
vint64m2_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredand_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd,
vint64m4_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredand_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd,
vint64m8_t vs2, vint64m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd,
vint8mf8_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd,
vint8mf4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd,
vint8mf2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd,
vint8m1_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd,
vint8m2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd,
vint8m4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd,
vint8m8_t vs2, vint8m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd,
vint16mf4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd,
vint16mf2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd,
vint16m1_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd,
vint16m2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd,
vint16m4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd,
vint16m8_t vs2, vint16m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd,
vint32mf2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd,
vint32m1_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd,
vint32m2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd,
vint32m4_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd,
vint32m8_t vs2, vint32m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd,
vint64m1_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd,
vint64m2_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd,
vint64m4_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd,
vint64m8_t vs2, vint64m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd,
vint8mf8_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd,
vint8mf4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd,
vint8mf2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd,
vint8m1_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd,
vint8m2_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd,
vint8m4_t vs2, vint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd,
vint8m8_t vs2, vint8m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredxor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd,
vint16mf4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredxor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd,
vint16mf2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd,
vint16m1_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd,
vint16m2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd,
vint16m4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd,
vint16m8_t vs2, vint16m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredxor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd,
vint32mf2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd,
vint32m1_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd,
vint32m2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd,
vint32m4_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd,
vint32m8_t vs2, vint32m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd,
vint64m1_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd,
vint64m2_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd,
vint64m4_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd,
vint64m8_t vs2, vint64m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd,
vuint8mf8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd,
vuint8mf4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd,
vuint8mf2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
vuint8m1_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd,
vuint8m2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd,
vuint8m4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd,
vuint8m8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd,
vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd,
vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
vuint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd,
vuint16m2_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd,
vuint16m4_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd,
vuint16m8_t vs2, vuint16m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd,
vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
vuint32m1_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd,
vuint32m2_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd,
vuint32m4_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd,
vuint32m8_t vs2, vuint32m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
vuint64m1_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd,
vuint64m2_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd,
vuint64m4_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd,
vuint64m8_t vs2, vuint64m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd,
vuint8mf8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd,
vuint8mf4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd,
vuint8mf2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
vuint8m1_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd,
vuint8m2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd,
vuint8m4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd,
vuint8m8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd,
vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd,
vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd,
vuint16m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd,
vuint16m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd,
vuint16m8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd,
vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
vuint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd,
vuint32m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd,
vuint32m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd,
vuint32m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
vuint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd,
vuint64m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd,
vuint64m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd,
vuint64m8_t vs2,
vuint64m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd,
vuint8mf8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd,
vuint8mf4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd,
vuint8mf2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
vuint8m1_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd,
vuint8m2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd,
vuint8m4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd,
vuint8m8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd,
vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd,
vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd,
vuint16m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd,
vuint16m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd,
vuint16m8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd,
vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
vuint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd,
vuint32m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd,
vuint32m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd,
vuint32m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
vuint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd,
vuint64m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd,
vuint64m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd,
vuint64m8_t vs2,
vuint64m1_t vs1, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd,
vuint8mf8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd,
vuint8mf4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd,
vuint8mf2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
vuint8m1_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd,
vuint8m2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd,
vuint8m4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd,
vuint8m8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredand_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd,
vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd,
vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
vuint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd,
vuint16m2_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd,
vuint16m4_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd,
vuint16m8_t vs2, vuint16m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredand_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd,
vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
vuint32m1_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd,
vuint32m2_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd,
vuint32m4_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd,
vuint32m8_t vs2, vuint32m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
vuint64m1_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd,
vuint64m2_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd,
vuint64m4_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd,
vuint64m8_t vs2, vuint64m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd,
vuint8mf8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd,
vuint8mf4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd,
vuint8mf2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
vuint8m1_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd,
vuint8m2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd,
vuint8m4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd,
vuint8m8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd,
vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd,
vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
vuint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd,
vuint16m2_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd,
vuint16m4_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd,
vuint16m8_t vs2, vuint16m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd,
vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
vuint32m1_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd,
vuint32m2_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd,
vuint32m4_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd,
vuint32m8_t vs2, vuint32m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
vuint64m1_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd,
vuint64m2_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd,
vuint64m4_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd,
vuint64m8_t vs2, vuint64m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd,
vuint8mf8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd,
vuint8mf4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd,
vuint8mf2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
vuint8m1_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd,
vuint8m2_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd,
vuint8m4_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd,
vuint8m8_t vs2, vuint8m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd,
vuint16mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd,
vuint16mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
vuint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd,
vuint16m2_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd,
vuint16m4_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd,
vuint16m8_t vs2, vuint16m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd,
vuint32mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
vuint32m1_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd,
vuint32m2_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd,
vuint32m4_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd,
vuint32m8_t vs2, vuint32m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
vuint64m1_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd,
vuint64m2_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd,
vuint64m4_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd,
vuint64m8_t vs2, vuint64m1_t vs1,
size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t vd, vint8mf8_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t vd, vint8mf4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t vd, vint8m1_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t vd, vint8m2_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t vd, vint8m4_t vs2,
vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t vd, vint8m8_t vs2,
vint16m1_t vs1, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t vd, vint16mf4_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t vd, vint16m1_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t vd, vint16m2_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t vd, vint16m4_t vs2,
vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t vd, vint16m8_t vs2,
vint32m1_t vs1, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t vd, vint32m1_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t vd, vint32m2_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t vd, vint32m4_t vs2,
vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t vd, vint32m8_t vs2,
vint64m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t vd, vuint8mf8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t vd, vuint8mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t vd, vuint8m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t vd, vuint8m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t vd, vuint8m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t vd, vuint8m8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t vd,
vuint16mf4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t vd,
vuint16mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t vd, vuint16m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t vd, vuint16m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t vd, vuint16m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t vd, vuint16m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t vd,
vuint32mf2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t vd, vuint32m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t vd, vuint32m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t vd, vuint32m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t vd, vuint32m8_t vs2,
vuint64m1_t vs1, size_t vl);
// masked functions
vint16m1_t __riscv_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t vm, vint16m1_t vd,
vint8mf8_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t vm, vint16m1_t vd,
vint8mf4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t vm, vint16m1_t vd,
vint8mf2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m1_i16m1_tum(vbool8_t vm, vint16m1_t vd,
vint8m1_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m2_i16m1_tum(vbool4_t vm, vint16m1_t vd,
vint8m2_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m4_i16m1_tum(vbool2_t vm, vint16m1_t vd,
vint8m4_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m8_i16m1_tum(vbool1_t vm, vint16m1_t vd,
vint8m8_t vs2, vint16m1_t vs1,
size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t vm, vint32m1_t vd,
vint16mf4_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t vm, vint32m1_t vd,
vint16mf2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m1_i32m1_tum(vbool16_t vm, vint32m1_t vd,
vint16m1_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m2_i32m1_tum(vbool8_t vm, vint32m1_t vd,
vint16m2_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m4_i32m1_tum(vbool4_t vm, vint32m1_t vd,
vint16m4_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m8_i32m1_tum(vbool2_t vm, vint32m1_t vd,
vint16m8_t vs2, vint32m1_t vs1,
size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t vm, vint64m1_t vd,
vint32mf2_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m1_i64m1_tum(vbool32_t vm, vint64m1_t vd,
vint32m1_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m2_i64m1_tum(vbool16_t vm, vint64m1_t vd,
vint32m2_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m4_i64m1_tum(vbool8_t vm, vint64m1_t vd,
vint32m4_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m8_i64m1_tum(vbool4_t vm, vint64m1_t vd,
vint32m8_t vs2, vint64m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t vm, vuint16m1_t vd,
vuint8mf8_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t vm, vuint16m1_t vd,
vuint8mf4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
vuint8mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t vm, vuint16m1_t vd,
vuint8m1_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t vm, vuint16m1_t vd,
vuint8m2_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t vm, vuint16m1_t vd,
vuint8m4_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t vm, vuint16m1_t vd,
vuint8m8_t vs2, vuint16m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t vm, vuint32m1_t vd,
vuint16mf4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
vuint16mf2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t vm, vuint32m1_t vd,
vuint16m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t vm, vuint32m1_t vd,
vuint16m2_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t vm, vuint32m1_t vd,
vuint16m4_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t vm, vuint32m1_t vd,
vuint16m8_t vs2,
vuint32m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
vuint32mf2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t vm, vuint64m1_t vd,
vuint32m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t vm, vuint64m1_t vd,
vuint32m2_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t vm, vuint64m1_t vd,
vuint32m4_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m8_u64m1_tum(vbool4_t vm, vuint64m1_t vd,
vuint32m8_t vs2,
vuint64m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1, size_t vl);
// masked functions
vfloat16m1_t __riscv_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t vm,
vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t vm,
vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t vm,
vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t vm,
vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t vm,
vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t vm,
vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16mf4_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16mf2_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16m1_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16m2_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16m4_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd,
vfloat16m8_t vs2,
vfloat16m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32mf2_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32m1_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32m2_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32m4_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd,
vfloat32m8_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd,
vfloat64m1_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd,
vfloat64m2_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd,
vfloat64m4_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd,
vfloat64m8_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
// masked functions
vfloat16m1_t
__riscv_vfredosum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, vfloat16m1_t vd,
vfloat16mf4_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredosum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, vfloat16m1_t vd,
vfloat16mf2_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredosum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd,
vfloat16m1_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredosum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd,
vfloat16m2_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredosum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd,
vfloat16m4_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredosum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd,
vfloat16m8_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredosum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd,
vfloat32mf2_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredosum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
vfloat32m1_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredosum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd,
vfloat32m2_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredosum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat32m4_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredosum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat32m8_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfredosum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd,
vfloat64m1_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfredosum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd,
vfloat64m2_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfredosum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd,
vfloat64m4_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfredosum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat64m8_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredusum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, vfloat16m1_t vd,
vfloat16mf4_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredusum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, vfloat16m1_t vd,
vfloat16mf2_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredusum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd,
vfloat16m1_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredusum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd,
vfloat16m2_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredusum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd,
vfloat16m4_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat16m1_t
__riscv_vfredusum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd,
vfloat16m8_t vs2, vfloat16m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredusum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd,
vfloat32mf2_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredusum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
vfloat32m1_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredusum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd,
vfloat32m2_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredusum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat32m4_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfredusum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat32m8_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfredusum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd,
vfloat64m1_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfredusum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd,
vfloat64m2_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfredusum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd,
vfloat64m4_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfredusum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat64m8_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd,
vfloat16mf4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd,
vfloat16mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t vd,
vfloat16m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t vd,
vfloat16m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t vd,
vfloat16m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t vd,
vfloat16m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd,
vfloat32mf2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t vd,
vfloat32m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t vd,
vfloat32m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t vd,
vfloat32m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t vd,
vfloat32m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd,
vfloat16mf4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd,
vfloat16mf2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t vd,
vfloat16m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t vd,
vfloat16m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t vd,
vfloat16m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t vd,
vfloat16m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd,
vfloat32mf2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t vd,
vfloat32m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t vd,
vfloat32m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t vd,
vfloat32m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t vd,
vfloat32m8_t vs2,
vfloat64m1_t vs1, size_t vl);
// masked functions
vfloat32m1_t __riscv_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t vm,
vfloat32m1_t vd,
vfloat16mf4_t vs2,
vfloat32m1_t vs1,
size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t vm,
vfloat32m1_t vd,
vfloat16mf2_t vs2,
vfloat32m1_t vs1,
size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t vm,
vfloat32m1_t vd,
vfloat16m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat16m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat16m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd,
vfloat16m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t vm,
vfloat64m1_t vd,
vfloat32mf2_t vs2,
vfloat64m1_t vs1,
size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t vm,
vfloat64m1_t vd,
vfloat32m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t vm,
vfloat64m1_t vd,
vfloat32m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat32m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd,
vfloat32m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t vm,
vfloat32m1_t vd,
vfloat16mf4_t vs2,
vfloat32m1_t vs1,
size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t vm,
vfloat32m1_t vd,
vfloat16mf2_t vs2,
vfloat32m1_t vs1,
size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t vm,
vfloat32m1_t vd,
vfloat16m1_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat16m2_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat16m4_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd,
vfloat16m8_t vs2,
vfloat32m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t vm,
vfloat64m1_t vd,
vfloat32mf2_t vs2,
vfloat64m1_t vs1,
size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t vm,
vfloat64m1_t vd,
vfloat32m1_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t vm,
vfloat64m1_t vd,
vfloat32m2_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat32m4_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd,
vfloat32m8_t vs2,
vfloat64m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16mf4_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16mf2_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16m1_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16m2_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16m4_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16m8_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32mf2_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32m1_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32m2_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32m4_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32m8_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16mf4_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16mf2_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16m1_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16m2_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16m4_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd,
vfloat16m8_t vs2,
vfloat32m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32mf2_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32m1_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32m2_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32m4_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd,
vfloat32m8_t vs2,
vfloat64m1_t vs1,
unsigned int frm,
size_t vl);
// masked functions
vfloat32m1_t
__riscv_vfwredosum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd,
vfloat16mf4_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredosum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
vfloat16mf2_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredosum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd,
vfloat16m1_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredosum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat16m2_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredosum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat16m4_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredosum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd,
vfloat16m8_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredosum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd,
vfloat32mf2_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredosum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd,
vfloat32m1_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredosum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd,
vfloat32m2_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredosum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat32m4_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredosum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd,
vfloat32m8_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredusum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd,
vfloat16mf4_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredusum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
vfloat16mf2_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredusum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd,
vfloat16m1_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredusum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd,
vfloat16m2_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredusum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd,
vfloat16m4_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat32m1_t
__riscv_vfwredusum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd,
vfloat16m8_t vs2, vfloat32m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredusum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd,
vfloat32mf2_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredusum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd,
vfloat32m1_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredusum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd,
vfloat32m2_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredusum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd,
vfloat32m4_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);
vfloat64m1_t
__riscv_vfwredusum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd,
vfloat32m8_t vs2, vfloat64m1_t vs1,
unsigned int frm, size_t vl);