_Float16 __riscv_vfmv_f_s_f16mf4_f16(vfloat16mf4_t vs1);
vfloat16mf4_t __riscv_vfmv_s_f_f16mf4(_Float16 rs1, size_t vl);
_Float16 __riscv_vfmv_f_s_f16mf2_f16(vfloat16mf2_t vs1);
vfloat16mf2_t __riscv_vfmv_s_f_f16mf2(_Float16 rs1, size_t vl);
_Float16 __riscv_vfmv_f_s_f16m1_f16(vfloat16m1_t vs1);
vfloat16m1_t __riscv_vfmv_s_f_f16m1(_Float16 rs1, size_t vl);
_Float16 __riscv_vfmv_f_s_f16m2_f16(vfloat16m2_t vs1);
vfloat16m2_t __riscv_vfmv_s_f_f16m2(_Float16 rs1, size_t vl);
_Float16 __riscv_vfmv_f_s_f16m4_f16(vfloat16m4_t vs1);
vfloat16m4_t __riscv_vfmv_s_f_f16m4(_Float16 rs1, size_t vl);
_Float16 __riscv_vfmv_f_s_f16m8_f16(vfloat16m8_t vs1);
vfloat16m8_t __riscv_vfmv_s_f_f16m8(_Float16 rs1, size_t vl);
float __riscv_vfmv_f_s_f32mf2_f32(vfloat32mf2_t vs1);
vfloat32mf2_t __riscv_vfmv_s_f_f32mf2(float rs1, size_t vl);
float __riscv_vfmv_f_s_f32m1_f32(vfloat32m1_t vs1);
vfloat32m1_t __riscv_vfmv_s_f_f32m1(float rs1, size_t vl);
float __riscv_vfmv_f_s_f32m2_f32(vfloat32m2_t vs1);
vfloat32m2_t __riscv_vfmv_s_f_f32m2(float rs1, size_t vl);
float __riscv_vfmv_f_s_f32m4_f32(vfloat32m4_t vs1);
vfloat32m4_t __riscv_vfmv_s_f_f32m4(float rs1, size_t vl);
float __riscv_vfmv_f_s_f32m8_f32(vfloat32m8_t vs1);
vfloat32m8_t __riscv_vfmv_s_f_f32m8(float rs1, size_t vl);
double __riscv_vfmv_f_s_f64m1_f64(vfloat64m1_t vs1);
vfloat64m1_t __riscv_vfmv_s_f_f64m1(double rs1, size_t vl);
double __riscv_vfmv_f_s_f64m2_f64(vfloat64m2_t vs1);
vfloat64m2_t __riscv_vfmv_s_f_f64m2(double rs1, size_t vl);
double __riscv_vfmv_f_s_f64m4_f64(vfloat64m4_t vs1);
vfloat64m4_t __riscv_vfmv_s_f_f64m4(double rs1, size_t vl);
double __riscv_vfmv_f_s_f64m8_f64(vfloat64m8_t vs1);
vfloat64m8_t __riscv_vfmv_s_f_f64m8(double rs1, size_t vl);
int8_t __riscv_vmv_x_s_i8mf8_i8(vint8mf8_t vs1);
vint8mf8_t __riscv_vmv_s_x_i8mf8(int8_t rs1, size_t vl);
int8_t __riscv_vmv_x_s_i8mf4_i8(vint8mf4_t vs1);
vint8mf4_t __riscv_vmv_s_x_i8mf4(int8_t rs1, size_t vl);
int8_t __riscv_vmv_x_s_i8mf2_i8(vint8mf2_t vs1);
vint8mf2_t __riscv_vmv_s_x_i8mf2(int8_t rs1, size_t vl);
int8_t __riscv_vmv_x_s_i8m1_i8(vint8m1_t vs1);
vint8m1_t __riscv_vmv_s_x_i8m1(int8_t rs1, size_t vl);
int8_t __riscv_vmv_x_s_i8m2_i8(vint8m2_t vs1);
vint8m2_t __riscv_vmv_s_x_i8m2(int8_t rs1, size_t vl);
int8_t __riscv_vmv_x_s_i8m4_i8(vint8m4_t vs1);
vint8m4_t __riscv_vmv_s_x_i8m4(int8_t rs1, size_t vl);
int8_t __riscv_vmv_x_s_i8m8_i8(vint8m8_t vs1);
vint8m8_t __riscv_vmv_s_x_i8m8(int8_t rs1, size_t vl);
int16_t __riscv_vmv_x_s_i16mf4_i16(vint16mf4_t vs1);
vint16mf4_t __riscv_vmv_s_x_i16mf4(int16_t rs1, size_t vl);
int16_t __riscv_vmv_x_s_i16mf2_i16(vint16mf2_t vs1);
vint16mf2_t __riscv_vmv_s_x_i16mf2(int16_t rs1, size_t vl);
int16_t __riscv_vmv_x_s_i16m1_i16(vint16m1_t vs1);
vint16m1_t __riscv_vmv_s_x_i16m1(int16_t rs1, size_t vl);
int16_t __riscv_vmv_x_s_i16m2_i16(vint16m2_t vs1);
vint16m2_t __riscv_vmv_s_x_i16m2(int16_t rs1, size_t vl);
int16_t __riscv_vmv_x_s_i16m4_i16(vint16m4_t vs1);
vint16m4_t __riscv_vmv_s_x_i16m4(int16_t rs1, size_t vl);
int16_t __riscv_vmv_x_s_i16m8_i16(vint16m8_t vs1);
vint16m8_t __riscv_vmv_s_x_i16m8(int16_t rs1, size_t vl);
int32_t __riscv_vmv_x_s_i32mf2_i32(vint32mf2_t vs1);
vint32mf2_t __riscv_vmv_s_x_i32mf2(int32_t rs1, size_t vl);
int32_t __riscv_vmv_x_s_i32m1_i32(vint32m1_t vs1);
vint32m1_t __riscv_vmv_s_x_i32m1(int32_t rs1, size_t vl);
int32_t __riscv_vmv_x_s_i32m2_i32(vint32m2_t vs1);
vint32m2_t __riscv_vmv_s_x_i32m2(int32_t rs1, size_t vl);
int32_t __riscv_vmv_x_s_i32m4_i32(vint32m4_t vs1);
vint32m4_t __riscv_vmv_s_x_i32m4(int32_t rs1, size_t vl);
int32_t __riscv_vmv_x_s_i32m8_i32(vint32m8_t vs1);
vint32m8_t __riscv_vmv_s_x_i32m8(int32_t rs1, size_t vl);
int64_t __riscv_vmv_x_s_i64m1_i64(vint64m1_t vs1);
vint64m1_t __riscv_vmv_s_x_i64m1(int64_t rs1, size_t vl);
int64_t __riscv_vmv_x_s_i64m2_i64(vint64m2_t vs1);
vint64m2_t __riscv_vmv_s_x_i64m2(int64_t rs1, size_t vl);
int64_t __riscv_vmv_x_s_i64m4_i64(vint64m4_t vs1);
vint64m4_t __riscv_vmv_s_x_i64m4(int64_t rs1, size_t vl);
int64_t __riscv_vmv_x_s_i64m8_i64(vint64m8_t vs1);
vint64m8_t __riscv_vmv_s_x_i64m8(int64_t rs1, size_t vl);
uint8_t __riscv_vmv_x_s_u8mf8_u8(vuint8mf8_t vs1);
vuint8mf8_t __riscv_vmv_s_x_u8mf8(uint8_t rs1, size_t vl);
uint8_t __riscv_vmv_x_s_u8mf4_u8(vuint8mf4_t vs1);
vuint8mf4_t __riscv_vmv_s_x_u8mf4(uint8_t rs1, size_t vl);
uint8_t __riscv_vmv_x_s_u8mf2_u8(vuint8mf2_t vs1);
vuint8mf2_t __riscv_vmv_s_x_u8mf2(uint8_t rs1, size_t vl);
uint8_t __riscv_vmv_x_s_u8m1_u8(vuint8m1_t vs1);
vuint8m1_t __riscv_vmv_s_x_u8m1(uint8_t rs1, size_t vl);
uint8_t __riscv_vmv_x_s_u8m2_u8(vuint8m2_t vs1);
vuint8m2_t __riscv_vmv_s_x_u8m2(uint8_t rs1, size_t vl);
uint8_t __riscv_vmv_x_s_u8m4_u8(vuint8m4_t vs1);
vuint8m4_t __riscv_vmv_s_x_u8m4(uint8_t rs1, size_t vl);
uint8_t __riscv_vmv_x_s_u8m8_u8(vuint8m8_t vs1);
vuint8m8_t __riscv_vmv_s_x_u8m8(uint8_t rs1, size_t vl);
uint16_t __riscv_vmv_x_s_u16mf4_u16(vuint16mf4_t vs1);
vuint16mf4_t __riscv_vmv_s_x_u16mf4(uint16_t rs1, size_t vl);
uint16_t __riscv_vmv_x_s_u16mf2_u16(vuint16mf2_t vs1);
vuint16mf2_t __riscv_vmv_s_x_u16mf2(uint16_t rs1, size_t vl);
uint16_t __riscv_vmv_x_s_u16m1_u16(vuint16m1_t vs1);
vuint16m1_t __riscv_vmv_s_x_u16m1(uint16_t rs1, size_t vl);
uint16_t __riscv_vmv_x_s_u16m2_u16(vuint16m2_t vs1);
vuint16m2_t __riscv_vmv_s_x_u16m2(uint16_t rs1, size_t vl);
uint16_t __riscv_vmv_x_s_u16m4_u16(vuint16m4_t vs1);
vuint16m4_t __riscv_vmv_s_x_u16m4(uint16_t rs1, size_t vl);
uint16_t __riscv_vmv_x_s_u16m8_u16(vuint16m8_t vs1);
vuint16m8_t __riscv_vmv_s_x_u16m8(uint16_t rs1, size_t vl);
uint32_t __riscv_vmv_x_s_u32mf2_u32(vuint32mf2_t vs1);
vuint32mf2_t __riscv_vmv_s_x_u32mf2(uint32_t rs1, size_t vl);
uint32_t __riscv_vmv_x_s_u32m1_u32(vuint32m1_t vs1);
vuint32m1_t __riscv_vmv_s_x_u32m1(uint32_t rs1, size_t vl);
uint32_t __riscv_vmv_x_s_u32m2_u32(vuint32m2_t vs1);
vuint32m2_t __riscv_vmv_s_x_u32m2(uint32_t rs1, size_t vl);
uint32_t __riscv_vmv_x_s_u32m4_u32(vuint32m4_t vs1);
vuint32m4_t __riscv_vmv_s_x_u32m4(uint32_t rs1, size_t vl);
uint32_t __riscv_vmv_x_s_u32m8_u32(vuint32m8_t vs1);
vuint32m8_t __riscv_vmv_s_x_u32m8(uint32_t rs1, size_t vl);
uint64_t __riscv_vmv_x_s_u64m1_u64(vuint64m1_t vs1);
vuint64m1_t __riscv_vmv_s_x_u64m1(uint64_t rs1, size_t vl);
uint64_t __riscv_vmv_x_s_u64m2_u64(vuint64m2_t vs1);
vuint64m2_t __riscv_vmv_s_x_u64m2(uint64_t rs1, size_t vl);
uint64_t __riscv_vmv_x_s_u64m4_u64(vuint64m4_t vs1);
vuint64m4_t __riscv_vmv_s_x_u64m4(uint64_t rs1, size_t vl);
uint64_t __riscv_vmv_x_s_u64m8_u64(vuint64m8_t vs1);
vuint64m8_t __riscv_vmv_s_x_u64m8(uint64_t rs1, size_t vl);
vfloat16mf4_t __riscv_vslideup_vx_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2,
size_t rs1, size_t vl);
vfloat16mf2_t __riscv_vslideup_vx_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2,
size_t rs1, size_t vl);
vfloat16m1_t __riscv_vslideup_vx_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2,
size_t rs1, size_t vl);
vfloat16m2_t __riscv_vslideup_vx_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2,
size_t rs1, size_t vl);
vfloat16m4_t __riscv_vslideup_vx_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2,
size_t rs1, size_t vl);
vfloat16m8_t __riscv_vslideup_vx_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2,
size_t rs1, size_t vl);
vfloat32mf2_t __riscv_vslideup_vx_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2,
size_t rs1, size_t vl);
vfloat32m1_t __riscv_vslideup_vx_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2,
size_t rs1, size_t vl);
vfloat32m2_t __riscv_vslideup_vx_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2,
size_t rs1, size_t vl);
vfloat32m4_t __riscv_vslideup_vx_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2,
size_t rs1, size_t vl);
vfloat32m8_t __riscv_vslideup_vx_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2,
size_t rs1, size_t vl);
vfloat64m1_t __riscv_vslideup_vx_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2,
size_t rs1, size_t vl);
vfloat64m2_t __riscv_vslideup_vx_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2,
size_t rs1, size_t vl);
vfloat64m4_t __riscv_vslideup_vx_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2,
size_t rs1, size_t vl);
vfloat64m8_t __riscv_vslideup_vx_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2,
size_t rs1, size_t vl);
vint8mf8_t __riscv_vslideup_vx_i8mf8(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1,
size_t vl);
vint8mf4_t __riscv_vslideup_vx_i8mf4(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1,
size_t vl);
vint8mf2_t __riscv_vslideup_vx_i8mf2(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1,
size_t vl);
vint8m1_t __riscv_vslideup_vx_i8m1(vint8m1_t vd, vint8m1_t vs2, size_t rs1,
size_t vl);
vint8m2_t __riscv_vslideup_vx_i8m2(vint8m2_t vd, vint8m2_t vs2, size_t rs1,
size_t vl);
vint8m4_t __riscv_vslideup_vx_i8m4(vint8m4_t vd, vint8m4_t vs2, size_t rs1,
size_t vl);
vint8m8_t __riscv_vslideup_vx_i8m8(vint8m8_t vd, vint8m8_t vs2, size_t rs1,
size_t vl);
vint16mf4_t __riscv_vslideup_vx_i16mf4(vint16mf4_t vd, vint16mf4_t vs2,
size_t rs1, size_t vl);
vint16mf2_t __riscv_vslideup_vx_i16mf2(vint16mf2_t vd, vint16mf2_t vs2,
size_t rs1, size_t vl);
vint16m1_t __riscv_vslideup_vx_i16m1(vint16m1_t vd, vint16m1_t vs2, size_t rs1,
size_t vl);
vint16m2_t __riscv_vslideup_vx_i16m2(vint16m2_t vd, vint16m2_t vs2, size_t rs1,
size_t vl);
vint16m4_t __riscv_vslideup_vx_i16m4(vint16m4_t vd, vint16m4_t vs2, size_t rs1,
size_t vl);
vint16m8_t __riscv_vslideup_vx_i16m8(vint16m8_t vd, vint16m8_t vs2, size_t rs1,
size_t vl);
vint32mf2_t __riscv_vslideup_vx_i32mf2(vint32mf2_t vd, vint32mf2_t vs2,
size_t rs1, size_t vl);
vint32m1_t __riscv_vslideup_vx_i32m1(vint32m1_t vd, vint32m1_t vs2, size_t rs1,
size_t vl);
vint32m2_t __riscv_vslideup_vx_i32m2(vint32m2_t vd, vint32m2_t vs2, size_t rs1,
size_t vl);
vint32m4_t __riscv_vslideup_vx_i32m4(vint32m4_t vd, vint32m4_t vs2, size_t rs1,
size_t vl);
vint32m8_t __riscv_vslideup_vx_i32m8(vint32m8_t vd, vint32m8_t vs2, size_t rs1,
size_t vl);
vint64m1_t __riscv_vslideup_vx_i64m1(vint64m1_t vd, vint64m1_t vs2, size_t rs1,
size_t vl);
vint64m2_t __riscv_vslideup_vx_i64m2(vint64m2_t vd, vint64m2_t vs2, size_t rs1,
size_t vl);
vint64m4_t __riscv_vslideup_vx_i64m4(vint64m4_t vd, vint64m4_t vs2, size_t rs1,
size_t vl);
vint64m8_t __riscv_vslideup_vx_i64m8(vint64m8_t vd, vint64m8_t vs2, size_t rs1,
size_t vl);
vuint8mf8_t __riscv_vslideup_vx_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2,
size_t rs1, size_t vl);
vuint8mf4_t __riscv_vslideup_vx_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2,
size_t rs1, size_t vl);
vuint8mf2_t __riscv_vslideup_vx_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2,
size_t rs1, size_t vl);
vuint8m1_t __riscv_vslideup_vx_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1,
size_t vl);
vuint8m2_t __riscv_vslideup_vx_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1,
size_t vl);
vuint8m4_t __riscv_vslideup_vx_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1,
size_t vl);
vuint8m8_t __riscv_vslideup_vx_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1,
size_t vl);
vuint16mf4_t __riscv_vslideup_vx_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2,
size_t rs1, size_t vl);
vuint16mf2_t __riscv_vslideup_vx_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2,
size_t rs1, size_t vl);
vuint16m1_t __riscv_vslideup_vx_u16m1(vuint16m1_t vd, vuint16m1_t vs2,
size_t rs1, size_t vl);
vuint16m2_t __riscv_vslideup_vx_u16m2(vuint16m2_t vd, vuint16m2_t vs2,
size_t rs1, size_t vl);
vuint16m4_t __riscv_vslideup_vx_u16m4(vuint16m4_t vd, vuint16m4_t vs2,
size_t rs1, size_t vl);
vuint16m8_t __riscv_vslideup_vx_u16m8(vuint16m8_t vd, vuint16m8_t vs2,
size_t rs1, size_t vl);
vuint32mf2_t __riscv_vslideup_vx_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2,
size_t rs1, size_t vl);
vuint32m1_t __riscv_vslideup_vx_u32m1(vuint32m1_t vd, vuint32m1_t vs2,
size_t rs1, size_t vl);
vuint32m2_t __riscv_vslideup_vx_u32m2(vuint32m2_t vd, vuint32m2_t vs2,
size_t rs1, size_t vl);
vuint32m4_t __riscv_vslideup_vx_u32m4(vuint32m4_t vd, vuint32m4_t vs2,
size_t rs1, size_t vl);
vuint32m8_t __riscv_vslideup_vx_u32m8(vuint32m8_t vd, vuint32m8_t vs2,
size_t rs1, size_t vl);
vuint64m1_t __riscv_vslideup_vx_u64m1(vuint64m1_t vd, vuint64m1_t vs2,
size_t rs1, size_t vl);
vuint64m2_t __riscv_vslideup_vx_u64m2(vuint64m2_t vd, vuint64m2_t vs2,
size_t rs1, size_t vl);
vuint64m4_t __riscv_vslideup_vx_u64m4(vuint64m4_t vd, vuint64m4_t vs2,
size_t rs1, size_t vl);
vuint64m8_t __riscv_vslideup_vx_u64m8(vuint64m8_t vd, vuint64m8_t vs2,
size_t rs1, size_t vl);
// masked functions
vfloat16mf4_t __riscv_vslideup_vx_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd,
vfloat16mf4_t vs2, size_t rs1,
size_t vl);
vfloat16mf2_t __riscv_vslideup_vx_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd,
vfloat16mf2_t vs2, size_t rs1,
size_t vl);
vfloat16m1_t __riscv_vslideup_vx_f16m1_m(vbool16_t vm, vfloat16m1_t vd,
vfloat16m1_t vs2, size_t rs1,
size_t vl);
vfloat16m2_t __riscv_vslideup_vx_f16m2_m(vbool8_t vm, vfloat16m2_t vd,
vfloat16m2_t vs2, size_t rs1,
size_t vl);
vfloat16m4_t __riscv_vslideup_vx_f16m4_m(vbool4_t vm, vfloat16m4_t vd,
vfloat16m4_t vs2, size_t rs1,
size_t vl);
vfloat16m8_t __riscv_vslideup_vx_f16m8_m(vbool2_t vm, vfloat16m8_t vd,
vfloat16m8_t vs2, size_t rs1,
size_t vl);
vfloat32mf2_t __riscv_vslideup_vx_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
vfloat32mf2_t vs2, size_t rs1,
size_t vl);
vfloat32m1_t __riscv_vslideup_vx_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
vfloat32m1_t vs2, size_t rs1,
size_t vl);
vfloat32m2_t __riscv_vslideup_vx_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
vfloat32m2_t vs2, size_t rs1,
size_t vl);
vfloat32m4_t __riscv_vslideup_vx_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
vfloat32m4_t vs2, size_t rs1,
size_t vl);
vfloat32m8_t __riscv_vslideup_vx_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
vfloat32m8_t vs2, size_t rs1,
size_t vl);
vfloat64m1_t __riscv_vslideup_vx_f64m1_m(vbool64_t vm, vfloat64m1_t vd,
vfloat64m1_t vs2, size_t rs1,
size_t vl);
vfloat64m2_t __riscv_vslideup_vx_f64m2_m(vbool32_t vm, vfloat64m2_t vd,
vfloat64m2_t vs2, size_t rs1,
size_t vl);
vfloat64m4_t __riscv_vslideup_vx_f64m4_m(vbool16_t vm, vfloat64m4_t vd,
vfloat64m4_t vs2, size_t rs1,
size_t vl);
vfloat64m8_t __riscv_vslideup_vx_f64m8_m(vbool8_t vm, vfloat64m8_t vd,
vfloat64m8_t vs2, size_t rs1,
size_t vl);
vint8mf8_t __riscv_vslideup_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd,
vint8mf8_t vs2, size_t rs1, size_t vl);
vint8mf4_t __riscv_vslideup_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd,
vint8mf4_t vs2, size_t rs1, size_t vl);
vint8mf2_t __riscv_vslideup_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd,
vint8mf2_t vs2, size_t rs1, size_t vl);
vint8m1_t __riscv_vslideup_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
size_t rs1, size_t vl);
vint8m2_t __riscv_vslideup_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
size_t rs1, size_t vl);
vint8m4_t __riscv_vslideup_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
size_t rs1, size_t vl);
vint8m8_t __riscv_vslideup_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
size_t rs1, size_t vl);
vint16mf4_t __riscv_vslideup_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd,
vint16mf4_t vs2, size_t rs1,
size_t vl);
vint16mf2_t __riscv_vslideup_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd,
vint16mf2_t vs2, size_t rs1,
size_t vl);
vint16m1_t __riscv_vslideup_vx_i16m1_m(vbool16_t vm, vint16m1_t vd,
vint16m1_t vs2, size_t rs1, size_t vl);
vint16m2_t __riscv_vslideup_vx_i16m2_m(vbool8_t vm, vint16m2_t vd,
vint16m2_t vs2, size_t rs1, size_t vl);
vint16m4_t __riscv_vslideup_vx_i16m4_m(vbool4_t vm, vint16m4_t vd,
vint16m4_t vs2, size_t rs1, size_t vl);
vint16m8_t __riscv_vslideup_vx_i16m8_m(vbool2_t vm, vint16m8_t vd,
vint16m8_t vs2, size_t rs1, size_t vl);
vint32mf2_t __riscv_vslideup_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
vint32mf2_t vs2, size_t rs1,
size_t vl);
vint32m1_t __riscv_vslideup_vx_i32m1_m(vbool32_t vm, vint32m1_t vd,
vint32m1_t vs2, size_t rs1, size_t vl);
vint32m2_t __riscv_vslideup_vx_i32m2_m(vbool16_t vm, vint32m2_t vd,
vint32m2_t vs2, size_t rs1, size_t vl);
vint32m4_t __riscv_vslideup_vx_i32m4_m(vbool8_t vm, vint32m4_t vd,
vint32m4_t vs2, size_t rs1, size_t vl);
vint32m8_t __riscv_vslideup_vx_i32m8_m(vbool4_t vm, vint32m8_t vd,
vint32m8_t vs2, size_t rs1, size_t vl);
vint64m1_t __riscv_vslideup_vx_i64m1_m(vbool64_t vm, vint64m1_t vd,
vint64m1_t vs2, size_t rs1, size_t vl);
vint64m2_t __riscv_vslideup_vx_i64m2_m(vbool32_t vm, vint64m2_t vd,
vint64m2_t vs2, size_t rs1, size_t vl);
vint64m4_t __riscv_vslideup_vx_i64m4_m(vbool16_t vm, vint64m4_t vd,
vint64m4_t vs2, size_t rs1, size_t vl);
vint64m8_t __riscv_vslideup_vx_i64m8_m(vbool8_t vm, vint64m8_t vd,
vint64m8_t vs2, size_t rs1, size_t vl);
vuint8mf8_t __riscv_vslideup_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd,
vuint8mf8_t vs2, size_t rs1, size_t vl);
vuint8mf4_t __riscv_vslideup_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd,
vuint8mf4_t vs2, size_t rs1, size_t vl);
vuint8mf2_t __riscv_vslideup_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd,
vuint8mf2_t vs2, size_t rs1, size_t vl);
vuint8m1_t __riscv_vslideup_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd,
vuint8m1_t vs2, size_t rs1, size_t vl);
vuint8m2_t __riscv_vslideup_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd,
vuint8m2_t vs2, size_t rs1, size_t vl);
vuint8m4_t __riscv_vslideup_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd,
vuint8m4_t vs2, size_t rs1, size_t vl);
vuint8m8_t __riscv_vslideup_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd,
vuint8m8_t vs2, size_t rs1, size_t vl);
vuint16mf4_t __riscv_vslideup_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd,
vuint16mf4_t vs2, size_t rs1,
size_t vl);
vuint16mf2_t __riscv_vslideup_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd,
vuint16mf2_t vs2, size_t rs1,
size_t vl);
vuint16m1_t __riscv_vslideup_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd,
vuint16m1_t vs2, size_t rs1, size_t vl);
vuint16m2_t __riscv_vslideup_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd,
vuint16m2_t vs2, size_t rs1, size_t vl);
vuint16m4_t __riscv_vslideup_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd,
vuint16m4_t vs2, size_t rs1, size_t vl);
vuint16m8_t __riscv_vslideup_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd,
vuint16m8_t vs2, size_t rs1, size_t vl);
vuint32mf2_t __riscv_vslideup_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd,
vuint32mf2_t vs2, size_t rs1,
size_t vl);
vuint32m1_t __riscv_vslideup_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd,
vuint32m1_t vs2, size_t rs1, size_t vl);
vuint32m2_t __riscv_vslideup_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd,
vuint32m2_t vs2, size_t rs1, size_t vl);
vuint32m4_t __riscv_vslideup_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd,
vuint32m4_t vs2, size_t rs1, size_t vl);
vuint32m8_t __riscv_vslideup_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd,
vuint32m8_t vs2, size_t rs1, size_t vl);
vuint64m1_t __riscv_vslideup_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd,
vuint64m1_t vs2, size_t rs1, size_t vl);
vuint64m2_t __riscv_vslideup_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd,
vuint64m2_t vs2, size_t rs1, size_t vl);
vuint64m4_t __riscv_vslideup_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd,
vuint64m4_t vs2, size_t rs1, size_t vl);
vuint64m8_t __riscv_vslideup_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd,
vuint64m8_t vs2, size_t rs1, size_t vl);
vfloat16mf4_t __riscv_vslidedown_vx_f16mf4(vfloat16mf4_t vs2, size_t rs1,
size_t vl);
vfloat16mf2_t __riscv_vslidedown_vx_f16mf2(vfloat16mf2_t vs2, size_t rs1,
size_t vl);
vfloat16m1_t __riscv_vslidedown_vx_f16m1(vfloat16m1_t vs2, size_t rs1,
size_t vl);
vfloat16m2_t __riscv_vslidedown_vx_f16m2(vfloat16m2_t vs2, size_t rs1,
size_t vl);
vfloat16m4_t __riscv_vslidedown_vx_f16m4(vfloat16m4_t vs2, size_t rs1,
size_t vl);
vfloat16m8_t __riscv_vslidedown_vx_f16m8(vfloat16m8_t vs2, size_t rs1,
size_t vl);
vfloat32mf2_t __riscv_vslidedown_vx_f32mf2(vfloat32mf2_t vs2, size_t rs1,
size_t vl);
vfloat32m1_t __riscv_vslidedown_vx_f32m1(vfloat32m1_t vs2, size_t rs1,
size_t vl);
vfloat32m2_t __riscv_vslidedown_vx_f32m2(vfloat32m2_t vs2, size_t rs1,
size_t vl);
vfloat32m4_t __riscv_vslidedown_vx_f32m4(vfloat32m4_t vs2, size_t rs1,
size_t vl);
vfloat32m8_t __riscv_vslidedown_vx_f32m8(vfloat32m8_t vs2, size_t rs1,
size_t vl);
vfloat64m1_t __riscv_vslidedown_vx_f64m1(vfloat64m1_t vs2, size_t rs1,
size_t vl);
vfloat64m2_t __riscv_vslidedown_vx_f64m2(vfloat64m2_t vs2, size_t rs1,
size_t vl);
vfloat64m4_t __riscv_vslidedown_vx_f64m4(vfloat64m4_t vs2, size_t rs1,
size_t vl);
vfloat64m8_t __riscv_vslidedown_vx_f64m8(vfloat64m8_t vs2, size_t rs1,
size_t vl);
vint8mf8_t __riscv_vslidedown_vx_i8mf8(vint8mf8_t vs2, size_t rs1, size_t vl);
vint8mf4_t __riscv_vslidedown_vx_i8mf4(vint8mf4_t vs2, size_t rs1, size_t vl);
vint8mf2_t __riscv_vslidedown_vx_i8mf2(vint8mf2_t vs2, size_t rs1, size_t vl);
vint8m1_t __riscv_vslidedown_vx_i8m1(vint8m1_t vs2, size_t rs1, size_t vl);
vint8m2_t __riscv_vslidedown_vx_i8m2(vint8m2_t vs2, size_t rs1, size_t vl);
vint8m4_t __riscv_vslidedown_vx_i8m4(vint8m4_t vs2, size_t rs1, size_t vl);
vint8m8_t __riscv_vslidedown_vx_i8m8(vint8m8_t vs2, size_t rs1, size_t vl);
vint16mf4_t __riscv_vslidedown_vx_i16mf4(vint16mf4_t vs2, size_t rs1,
size_t vl);
vint16mf2_t __riscv_vslidedown_vx_i16mf2(vint16mf2_t vs2, size_t rs1,
size_t vl);
vint16m1_t __riscv_vslidedown_vx_i16m1(vint16m1_t vs2, size_t rs1, size_t vl);
vint16m2_t __riscv_vslidedown_vx_i16m2(vint16m2_t vs2, size_t rs1, size_t vl);
vint16m4_t __riscv_vslidedown_vx_i16m4(vint16m4_t vs2, size_t rs1, size_t vl);
vint16m8_t __riscv_vslidedown_vx_i16m8(vint16m8_t vs2, size_t rs1, size_t vl);
vint32mf2_t __riscv_vslidedown_vx_i32mf2(vint32mf2_t vs2, size_t rs1,
size_t vl);
vint32m1_t __riscv_vslidedown_vx_i32m1(vint32m1_t vs2, size_t rs1, size_t vl);
vint32m2_t __riscv_vslidedown_vx_i32m2(vint32m2_t vs2, size_t rs1, size_t vl);
vint32m4_t __riscv_vslidedown_vx_i32m4(vint32m4_t vs2, size_t rs1, size_t vl);
vint32m8_t __riscv_vslidedown_vx_i32m8(vint32m8_t vs2, size_t rs1, size_t vl);
vint64m1_t __riscv_vslidedown_vx_i64m1(vint64m1_t vs2, size_t rs1, size_t vl);
vint64m2_t __riscv_vslidedown_vx_i64m2(vint64m2_t vs2, size_t rs1, size_t vl);
vint64m4_t __riscv_vslidedown_vx_i64m4(vint64m4_t vs2, size_t rs1, size_t vl);
vint64m8_t __riscv_vslidedown_vx_i64m8(vint64m8_t vs2, size_t rs1, size_t vl);
vuint8mf8_t __riscv_vslidedown_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl);
vuint8mf4_t __riscv_vslidedown_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl);
vuint8mf2_t __riscv_vslidedown_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl);
vuint8m1_t __riscv_vslidedown_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl);
vuint8m2_t __riscv_vslidedown_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl);
vuint8m4_t __riscv_vslidedown_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl);
vuint8m8_t __riscv_vslidedown_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl);
vuint16mf4_t __riscv_vslidedown_vx_u16mf4(vuint16mf4_t vs2, size_t rs1,
size_t vl);
vuint16mf2_t __riscv_vslidedown_vx_u16mf2(vuint16mf2_t vs2, size_t rs1,
size_t vl);
vuint16m1_t __riscv_vslidedown_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl);
vuint16m2_t __riscv_vslidedown_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl);
vuint16m4_t __riscv_vslidedown_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl);
vuint16m8_t __riscv_vslidedown_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl);
vuint32mf2_t __riscv_vslidedown_vx_u32mf2(vuint32mf2_t vs2, size_t rs1,
size_t vl);
vuint32m1_t __riscv_vslidedown_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl);
vuint32m2_t __riscv_vslidedown_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl);
vuint32m4_t __riscv_vslidedown_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl);
vuint32m8_t __riscv_vslidedown_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl);
vuint64m1_t __riscv_vslidedown_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl);
vuint64m2_t __riscv_vslidedown_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl);
vuint64m4_t __riscv_vslidedown_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl);
vuint64m8_t __riscv_vslidedown_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl);
// masked functions
vfloat16mf4_t __riscv_vslidedown_vx_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
size_t rs1, size_t vl);
vfloat16mf2_t __riscv_vslidedown_vx_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
size_t rs1, size_t vl);
vfloat16m1_t __riscv_vslidedown_vx_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
size_t rs1, size_t vl);
vfloat16m2_t __riscv_vslidedown_vx_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
size_t rs1, size_t vl);
vfloat16m4_t __riscv_vslidedown_vx_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
size_t rs1, size_t vl);
vfloat16m8_t __riscv_vslidedown_vx_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
size_t rs1, size_t vl);
vfloat32mf2_t __riscv_vslidedown_vx_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t rs1, size_t vl);
vfloat32m1_t __riscv_vslidedown_vx_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
size_t rs1, size_t vl);
vfloat32m2_t __riscv_vslidedown_vx_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t rs1, size_t vl);
vfloat32m4_t __riscv_vslidedown_vx_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
size_t rs1, size_t vl);
vfloat32m8_t __riscv_vslidedown_vx_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
size_t rs1, size_t vl);
vfloat64m1_t __riscv_vslidedown_vx_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
size_t rs1, size_t vl);
vfloat64m2_t __riscv_vslidedown_vx_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
size_t rs1, size_t vl);
vfloat64m4_t __riscv_vslidedown_vx_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
size_t rs1, size_t vl);
vfloat64m8_t __riscv_vslidedown_vx_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
size_t rs1, size_t vl);
vint8mf8_t __riscv_vslidedown_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2,
size_t rs1, size_t vl);
vint8mf4_t __riscv_vslidedown_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2,
size_t rs1, size_t vl);
vint8mf2_t __riscv_vslidedown_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2,
size_t rs1, size_t vl);
vint8m1_t __riscv_vslidedown_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t rs1,
size_t vl);
vint8m2_t __riscv_vslidedown_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t rs1,
size_t vl);
vint8m4_t __riscv_vslidedown_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t rs1,
size_t vl);
vint8m8_t __riscv_vslidedown_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t rs1,
size_t vl);
vint16mf4_t __riscv_vslidedown_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
size_t rs1, size_t vl);
vint16mf2_t __riscv_vslidedown_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
size_t rs1, size_t vl);
vint16m1_t __riscv_vslidedown_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2,
size_t rs1, size_t vl);
vint16m2_t __riscv_vslidedown_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2,
size_t rs1, size_t vl);
vint16m4_t __riscv_vslidedown_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2,
size_t rs1, size_t vl);
vint16m8_t __riscv_vslidedown_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2,
size_t rs1, size_t vl);
vint32mf2_t __riscv_vslidedown_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
size_t rs1, size_t vl);
vint32m1_t __riscv_vslidedown_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2,
size_t rs1, size_t vl);
vint32m2_t __riscv_vslidedown_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2,
size_t rs1, size_t vl);
vint32m4_t __riscv_vslidedown_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2,
size_t rs1, size_t vl);
vint32m8_t __riscv_vslidedown_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2,
size_t rs1, size_t vl);
vint64m1_t __riscv_vslidedown_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2,
size_t rs1, size_t vl);
vint64m2_t __riscv_vslidedown_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2,
size_t rs1, size_t vl);
vint64m4_t __riscv_vslidedown_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2,
size_t rs1, size_t vl);
vint64m8_t __riscv_vslidedown_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2,
size_t rs1, size_t vl);
vuint8mf8_t __riscv_vslidedown_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
size_t rs1, size_t vl);
vuint8mf4_t __riscv_vslidedown_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
size_t rs1, size_t vl);
vuint8mf2_t __riscv_vslidedown_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
size_t rs1, size_t vl);
vuint8m1_t __riscv_vslidedown_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1,
size_t vl);
vuint8m2_t __riscv_vslidedown_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1,
size_t vl);
vuint8m4_t __riscv_vslidedown_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1,
size_t vl);
vuint8m8_t __riscv_vslidedown_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1,
size_t vl);
vuint16mf4_t __riscv_vslidedown_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
size_t rs1, size_t vl);
vuint16mf2_t __riscv_vslidedown_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
size_t rs1, size_t vl);
vuint16m1_t __riscv_vslidedown_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
size_t rs1, size_t vl);
vuint16m2_t __riscv_vslidedown_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
size_t rs1, size_t vl);
vuint16m4_t __riscv_vslidedown_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
size_t rs1, size_t vl);
vuint16m8_t __riscv_vslidedown_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
size_t rs1, size_t vl);
vuint32mf2_t __riscv_vslidedown_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
size_t rs1, size_t vl);
vuint32m1_t __riscv_vslidedown_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
size_t rs1, size_t vl);
vuint32m2_t __riscv_vslidedown_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
size_t rs1, size_t vl);
vuint32m4_t __riscv_vslidedown_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
size_t rs1, size_t vl);
vuint32m8_t __riscv_vslidedown_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
size_t rs1, size_t vl);
vuint64m1_t __riscv_vslidedown_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
size_t rs1, size_t vl);
vuint64m2_t __riscv_vslidedown_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
size_t rs1, size_t vl);
vuint64m4_t __riscv_vslidedown_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
size_t rs1, size_t vl);
vuint64m8_t __riscv_vslidedown_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
size_t rs1, size_t vl);
vfloat16mf4_t __riscv_vfslide1up_vf_f16mf4(vfloat16mf4_t vs2, _Float16 rs1,
size_t vl);
vfloat16mf2_t __riscv_vfslide1up_vf_f16mf2(vfloat16mf2_t vs2, _Float16 rs1,
size_t vl);
vfloat16m1_t __riscv_vfslide1up_vf_f16m1(vfloat16m1_t vs2, _Float16 rs1,
size_t vl);
vfloat16m2_t __riscv_vfslide1up_vf_f16m2(vfloat16m2_t vs2, _Float16 rs1,
size_t vl);
vfloat16m4_t __riscv_vfslide1up_vf_f16m4(vfloat16m4_t vs2, _Float16 rs1,
size_t vl);
vfloat16m8_t __riscv_vfslide1up_vf_f16m8(vfloat16m8_t vs2, _Float16 rs1,
size_t vl);
vfloat32mf2_t __riscv_vfslide1up_vf_f32mf2(vfloat32mf2_t vs2, float rs1,
size_t vl);
vfloat32m1_t __riscv_vfslide1up_vf_f32m1(vfloat32m1_t vs2, float rs1,
size_t vl);
vfloat32m2_t __riscv_vfslide1up_vf_f32m2(vfloat32m2_t vs2, float rs1,
size_t vl);
vfloat32m4_t __riscv_vfslide1up_vf_f32m4(vfloat32m4_t vs2, float rs1,
size_t vl);
vfloat32m8_t __riscv_vfslide1up_vf_f32m8(vfloat32m8_t vs2, float rs1,
size_t vl);
vfloat64m1_t __riscv_vfslide1up_vf_f64m1(vfloat64m1_t vs2, double rs1,
size_t vl);
vfloat64m2_t __riscv_vfslide1up_vf_f64m2(vfloat64m2_t vs2, double rs1,
size_t vl);
vfloat64m4_t __riscv_vfslide1up_vf_f64m4(vfloat64m4_t vs2, double rs1,
size_t vl);
vfloat64m8_t __riscv_vfslide1up_vf_f64m8(vfloat64m8_t vs2, double rs1,
size_t vl);
vfloat16mf4_t __riscv_vfslide1down_vf_f16mf4(vfloat16mf4_t vs2, _Float16 rs1,
size_t vl);
vfloat16mf2_t __riscv_vfslide1down_vf_f16mf2(vfloat16mf2_t vs2, _Float16 rs1,
size_t vl);
vfloat16m1_t __riscv_vfslide1down_vf_f16m1(vfloat16m1_t vs2, _Float16 rs1,
size_t vl);
vfloat16m2_t __riscv_vfslide1down_vf_f16m2(vfloat16m2_t vs2, _Float16 rs1,
size_t vl);
vfloat16m4_t __riscv_vfslide1down_vf_f16m4(vfloat16m4_t vs2, _Float16 rs1,
size_t vl);
vfloat16m8_t __riscv_vfslide1down_vf_f16m8(vfloat16m8_t vs2, _Float16 rs1,
size_t vl);
vfloat32mf2_t __riscv_vfslide1down_vf_f32mf2(vfloat32mf2_t vs2, float rs1,
size_t vl);
vfloat32m1_t __riscv_vfslide1down_vf_f32m1(vfloat32m1_t vs2, float rs1,
size_t vl);
vfloat32m2_t __riscv_vfslide1down_vf_f32m2(vfloat32m2_t vs2, float rs1,
size_t vl);
vfloat32m4_t __riscv_vfslide1down_vf_f32m4(vfloat32m4_t vs2, float rs1,
size_t vl);
vfloat32m8_t __riscv_vfslide1down_vf_f32m8(vfloat32m8_t vs2, float rs1,
size_t vl);
vfloat64m1_t __riscv_vfslide1down_vf_f64m1(vfloat64m1_t vs2, double rs1,
size_t vl);
vfloat64m2_t __riscv_vfslide1down_vf_f64m2(vfloat64m2_t vs2, double rs1,
size_t vl);
vfloat64m4_t __riscv_vfslide1down_vf_f64m4(vfloat64m4_t vs2, double rs1,
size_t vl);
vfloat64m8_t __riscv_vfslide1down_vf_f64m8(vfloat64m8_t vs2, double rs1,
size_t vl);
vint8mf8_t __riscv_vslide1up_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl);
vint8mf4_t __riscv_vslide1up_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl);
vint8mf2_t __riscv_vslide1up_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl);
vint8m1_t __riscv_vslide1up_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl);
vint8m2_t __riscv_vslide1up_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl);
vint8m4_t __riscv_vslide1up_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl);
vint8m8_t __riscv_vslide1up_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl);
vint16mf4_t __riscv_vslide1up_vx_i16mf4(vint16mf4_t vs2, int16_t rs1,
size_t vl);
vint16mf2_t __riscv_vslide1up_vx_i16mf2(vint16mf2_t vs2, int16_t rs1,
size_t vl);
vint16m1_t __riscv_vslide1up_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl);
vint16m2_t __riscv_vslide1up_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl);
vint16m4_t __riscv_vslide1up_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl);
vint16m8_t __riscv_vslide1up_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl);
vint32mf2_t __riscv_vslide1up_vx_i32mf2(vint32mf2_t vs2, int32_t rs1,
size_t vl);
vint32m1_t __riscv_vslide1up_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl);
vint32m2_t __riscv_vslide1up_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl);
vint32m4_t __riscv_vslide1up_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl);
vint32m8_t __riscv_vslide1up_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl);
vint64m1_t __riscv_vslide1up_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl);
vint64m2_t __riscv_vslide1up_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl);
vint64m4_t __riscv_vslide1up_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl);
vint64m8_t __riscv_vslide1up_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl);
vint8mf8_t __riscv_vslide1down_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl);
vint8mf4_t __riscv_vslide1down_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl);
vint8mf2_t __riscv_vslide1down_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl);
vint8m1_t __riscv_vslide1down_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl);
vint8m2_t __riscv_vslide1down_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl);
vint8m4_t __riscv_vslide1down_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl);
vint8m8_t __riscv_vslide1down_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl);
vint16mf4_t __riscv_vslide1down_vx_i16mf4(vint16mf4_t vs2, int16_t rs1,
size_t vl);
vint16mf2_t __riscv_vslide1down_vx_i16mf2(vint16mf2_t vs2, int16_t rs1,
size_t vl);
vint16m1_t __riscv_vslide1down_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl);
vint16m2_t __riscv_vslide1down_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl);
vint16m4_t __riscv_vslide1down_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl);
vint16m8_t __riscv_vslide1down_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl);
vint32mf2_t __riscv_vslide1down_vx_i32mf2(vint32mf2_t vs2, int32_t rs1,
size_t vl);
vint32m1_t __riscv_vslide1down_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl);
vint32m2_t __riscv_vslide1down_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl);
vint32m4_t __riscv_vslide1down_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl);
vint32m8_t __riscv_vslide1down_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl);
vint64m1_t __riscv_vslide1down_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl);
vint64m2_t __riscv_vslide1down_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl);
vint64m4_t __riscv_vslide1down_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl);
vint64m8_t __riscv_vslide1down_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl);
vuint8mf8_t __riscv_vslide1up_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl);
vuint8mf4_t __riscv_vslide1up_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl);
vuint8mf2_t __riscv_vslide1up_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl);
vuint8m1_t __riscv_vslide1up_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl);
vuint8m2_t __riscv_vslide1up_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl);
vuint8m4_t __riscv_vslide1up_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl);
vuint8m8_t __riscv_vslide1up_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl);
vuint16mf4_t __riscv_vslide1up_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1,
size_t vl);
vuint16mf2_t __riscv_vslide1up_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1,
size_t vl);
vuint16m1_t __riscv_vslide1up_vx_u16m1(vuint16m1_t vs2, uint16_t rs1,
size_t vl);
vuint16m2_t __riscv_vslide1up_vx_u16m2(vuint16m2_t vs2, uint16_t rs1,
size_t vl);
vuint16m4_t __riscv_vslide1up_vx_u16m4(vuint16m4_t vs2, uint16_t rs1,
size_t vl);
vuint16m8_t __riscv_vslide1up_vx_u16m8(vuint16m8_t vs2, uint16_t rs1,
size_t vl);
vuint32mf2_t __riscv_vslide1up_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1,
size_t vl);
vuint32m1_t __riscv_vslide1up_vx_u32m1(vuint32m1_t vs2, uint32_t rs1,
size_t vl);
vuint32m2_t __riscv_vslide1up_vx_u32m2(vuint32m2_t vs2, uint32_t rs1,
size_t vl);
vuint32m4_t __riscv_vslide1up_vx_u32m4(vuint32m4_t vs2, uint32_t rs1,
size_t vl);
vuint32m8_t __riscv_vslide1up_vx_u32m8(vuint32m8_t vs2, uint32_t rs1,
size_t vl);
vuint64m1_t __riscv_vslide1up_vx_u64m1(vuint64m1_t vs2, uint64_t rs1,
size_t vl);
vuint64m2_t __riscv_vslide1up_vx_u64m2(vuint64m2_t vs2, uint64_t rs1,
size_t vl);
vuint64m4_t __riscv_vslide1up_vx_u64m4(vuint64m4_t vs2, uint64_t rs1,
size_t vl);
vuint64m8_t __riscv_vslide1up_vx_u64m8(vuint64m8_t vs2, uint64_t rs1,
size_t vl);
vuint8mf8_t __riscv_vslide1down_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1,
size_t vl);
vuint8mf4_t __riscv_vslide1down_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1,
size_t vl);
vuint8mf2_t __riscv_vslide1down_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1,
size_t vl);
vuint8m1_t __riscv_vslide1down_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl);
vuint8m2_t __riscv_vslide1down_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl);
vuint8m4_t __riscv_vslide1down_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl);
vuint8m8_t __riscv_vslide1down_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl);
vuint16mf4_t __riscv_vslide1down_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1,
size_t vl);
vuint16mf2_t __riscv_vslide1down_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1,
size_t vl);
vuint16m1_t __riscv_vslide1down_vx_u16m1(vuint16m1_t vs2, uint16_t rs1,
size_t vl);
vuint16m2_t __riscv_vslide1down_vx_u16m2(vuint16m2_t vs2, uint16_t rs1,
size_t vl);
vuint16m4_t __riscv_vslide1down_vx_u16m4(vuint16m4_t vs2, uint16_t rs1,
size_t vl);
vuint16m8_t __riscv_vslide1down_vx_u16m8(vuint16m8_t vs2, uint16_t rs1,
size_t vl);
vuint32mf2_t __riscv_vslide1down_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1,
size_t vl);
vuint32m1_t __riscv_vslide1down_vx_u32m1(vuint32m1_t vs2, uint32_t rs1,
size_t vl);
vuint32m2_t __riscv_vslide1down_vx_u32m2(vuint32m2_t vs2, uint32_t rs1,
size_t vl);
vuint32m4_t __riscv_vslide1down_vx_u32m4(vuint32m4_t vs2, uint32_t rs1,
size_t vl);
vuint32m8_t __riscv_vslide1down_vx_u32m8(vuint32m8_t vs2, uint32_t rs1,
size_t vl);
vuint64m1_t __riscv_vslide1down_vx_u64m1(vuint64m1_t vs2, uint64_t rs1,
size_t vl);
vuint64m2_t __riscv_vslide1down_vx_u64m2(vuint64m2_t vs2, uint64_t rs1,
size_t vl);
vuint64m4_t __riscv_vslide1down_vx_u64m4(vuint64m4_t vs2, uint64_t rs1,
size_t vl);
vuint64m8_t __riscv_vslide1down_vx_u64m8(vuint64m8_t vs2, uint64_t rs1,
size_t vl);
// masked functions
vfloat16mf4_t __riscv_vfslide1up_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
_Float16 rs1, size_t vl);
vfloat16mf2_t __riscv_vfslide1up_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
_Float16 rs1, size_t vl);
vfloat16m1_t __riscv_vfslide1up_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
_Float16 rs1, size_t vl);
vfloat16m2_t __riscv_vfslide1up_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
_Float16 rs1, size_t vl);
vfloat16m4_t __riscv_vfslide1up_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
_Float16 rs1, size_t vl);
vfloat16m8_t __riscv_vfslide1up_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
_Float16 rs1, size_t vl);
vfloat32mf2_t __riscv_vfslide1up_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
float rs1, size_t vl);
vfloat32m1_t __riscv_vfslide1up_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
float rs1, size_t vl);
vfloat32m2_t __riscv_vfslide1up_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
float rs1, size_t vl);
vfloat32m4_t __riscv_vfslide1up_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
float rs1, size_t vl);
vfloat32m8_t __riscv_vfslide1up_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
float rs1, size_t vl);
vfloat64m1_t __riscv_vfslide1up_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
double rs1, size_t vl);
vfloat64m2_t __riscv_vfslide1up_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
double rs1, size_t vl);
vfloat64m4_t __riscv_vfslide1up_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
double rs1, size_t vl);
vfloat64m8_t __riscv_vfslide1up_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
double rs1, size_t vl);
vfloat16mf4_t __riscv_vfslide1down_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
_Float16 rs1, size_t vl);
vfloat16mf2_t __riscv_vfslide1down_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
_Float16 rs1, size_t vl);
vfloat16m1_t __riscv_vfslide1down_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
_Float16 rs1, size_t vl);
vfloat16m2_t __riscv_vfslide1down_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
_Float16 rs1, size_t vl);
vfloat16m4_t __riscv_vfslide1down_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
_Float16 rs1, size_t vl);
vfloat16m8_t __riscv_vfslide1down_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
_Float16 rs1, size_t vl);
vfloat32mf2_t __riscv_vfslide1down_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
float rs1, size_t vl);
vfloat32m1_t __riscv_vfslide1down_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
float rs1, size_t vl);
vfloat32m2_t __riscv_vfslide1down_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
float rs1, size_t vl);
vfloat32m4_t __riscv_vfslide1down_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
float rs1, size_t vl);
vfloat32m8_t __riscv_vfslide1down_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
float rs1, size_t vl);
vfloat64m1_t __riscv_vfslide1down_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
double rs1, size_t vl);
vfloat64m2_t __riscv_vfslide1down_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
double rs1, size_t vl);
vfloat64m4_t __riscv_vfslide1down_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
double rs1, size_t vl);
vfloat64m8_t __riscv_vfslide1down_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
double rs1, size_t vl);
vint8mf8_t __riscv_vslide1up_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2,
int8_t rs1, size_t vl);
vint8mf4_t __riscv_vslide1up_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2,
int8_t rs1, size_t vl);
vint8mf2_t __riscv_vslide1up_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2,
int8_t rs1, size_t vl);
vint8m1_t __riscv_vslide1up_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1,
size_t vl);
vint8m2_t __riscv_vslide1up_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1,
size_t vl);
vint8m4_t __riscv_vslide1up_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1,
size_t vl);
vint8m8_t __riscv_vslide1up_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1,
size_t vl);
vint16mf4_t __riscv_vslide1up_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
int16_t rs1, size_t vl);
vint16mf2_t __riscv_vslide1up_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
int16_t rs1, size_t vl);
vint16m1_t __riscv_vslide1up_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2,
int16_t rs1, size_t vl);
vint16m2_t __riscv_vslide1up_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2,
int16_t rs1, size_t vl);
vint16m4_t __riscv_vslide1up_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2,
int16_t rs1, size_t vl);
vint16m8_t __riscv_vslide1up_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2,
int16_t rs1, size_t vl);
vint32mf2_t __riscv_vslide1up_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
int32_t rs1, size_t vl);
vint32m1_t __riscv_vslide1up_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2,
int32_t rs1, size_t vl);
vint32m2_t __riscv_vslide1up_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2,
int32_t rs1, size_t vl);
vint32m4_t __riscv_vslide1up_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2,
int32_t rs1, size_t vl);
vint32m8_t __riscv_vslide1up_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2,
int32_t rs1, size_t vl);
vint64m1_t __riscv_vslide1up_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2,
int64_t rs1, size_t vl);
vint64m2_t __riscv_vslide1up_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2,
int64_t rs1, size_t vl);
vint64m4_t __riscv_vslide1up_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2,
int64_t rs1, size_t vl);
vint64m8_t __riscv_vslide1up_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2,
int64_t rs1, size_t vl);
vint8mf8_t __riscv_vslide1down_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2,
int8_t rs1, size_t vl);
vint8mf4_t __riscv_vslide1down_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2,
int8_t rs1, size_t vl);
vint8mf2_t __riscv_vslide1down_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2,
int8_t rs1, size_t vl);
vint8m1_t __riscv_vslide1down_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1,
size_t vl);
vint8m2_t __riscv_vslide1down_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1,
size_t vl);
vint8m4_t __riscv_vslide1down_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1,
size_t vl);
vint8m8_t __riscv_vslide1down_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1,
size_t vl);
vint16mf4_t __riscv_vslide1down_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
int16_t rs1, size_t vl);
vint16mf2_t __riscv_vslide1down_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
int16_t rs1, size_t vl);
vint16m1_t __riscv_vslide1down_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2,
int16_t rs1, size_t vl);
vint16m2_t __riscv_vslide1down_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2,
int16_t rs1, size_t vl);
vint16m4_t __riscv_vslide1down_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2,
int16_t rs1, size_t vl);
vint16m8_t __riscv_vslide1down_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2,
int16_t rs1, size_t vl);
vint32mf2_t __riscv_vslide1down_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
int32_t rs1, size_t vl);
vint32m1_t __riscv_vslide1down_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2,
int32_t rs1, size_t vl);
vint32m2_t __riscv_vslide1down_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2,
int32_t rs1, size_t vl);
vint32m4_t __riscv_vslide1down_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2,
int32_t rs1, size_t vl);
vint32m8_t __riscv_vslide1down_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2,
int32_t rs1, size_t vl);
vint64m1_t __riscv_vslide1down_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2,
int64_t rs1, size_t vl);
vint64m2_t __riscv_vslide1down_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2,
int64_t rs1, size_t vl);
vint64m4_t __riscv_vslide1down_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2,
int64_t rs1, size_t vl);
vint64m8_t __riscv_vslide1down_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2,
int64_t rs1, size_t vl);
vuint8mf8_t __riscv_vslide1up_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
uint8_t rs1, size_t vl);
vuint8mf4_t __riscv_vslide1up_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
uint8_t rs1, size_t vl);
vuint8mf2_t __riscv_vslide1up_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
uint8_t rs1, size_t vl);
vuint8m1_t __riscv_vslide1up_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1,
size_t vl);
vuint8m2_t __riscv_vslide1up_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1,
size_t vl);
vuint8m4_t __riscv_vslide1up_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1,
size_t vl);
vuint8m8_t __riscv_vslide1up_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1,
size_t vl);
vuint16mf4_t __riscv_vslide1up_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
uint16_t rs1, size_t vl);
vuint16mf2_t __riscv_vslide1up_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
uint16_t rs1, size_t vl);
vuint16m1_t __riscv_vslide1up_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
uint16_t rs1, size_t vl);
vuint16m2_t __riscv_vslide1up_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
uint16_t rs1, size_t vl);
vuint16m4_t __riscv_vslide1up_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
uint16_t rs1, size_t vl);
vuint16m8_t __riscv_vslide1up_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
uint16_t rs1, size_t vl);
vuint32mf2_t __riscv_vslide1up_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
uint32_t rs1, size_t vl);
vuint32m1_t __riscv_vslide1up_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
uint32_t rs1, size_t vl);
vuint32m2_t __riscv_vslide1up_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
uint32_t rs1, size_t vl);
vuint32m4_t __riscv_vslide1up_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
uint32_t rs1, size_t vl);
vuint32m8_t __riscv_vslide1up_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
uint32_t rs1, size_t vl);
vuint64m1_t __riscv_vslide1up_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
uint64_t rs1, size_t vl);
vuint64m2_t __riscv_vslide1up_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
uint64_t rs1, size_t vl);
vuint64m4_t __riscv_vslide1up_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
uint64_t rs1, size_t vl);
vuint64m8_t __riscv_vslide1up_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
uint64_t rs1, size_t vl);
vuint8mf8_t __riscv_vslide1down_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
uint8_t rs1, size_t vl);
vuint8mf4_t __riscv_vslide1down_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
uint8_t rs1, size_t vl);
vuint8mf2_t __riscv_vslide1down_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
uint8_t rs1, size_t vl);
vuint8m1_t __riscv_vslide1down_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2,
uint8_t rs1, size_t vl);
vuint8m2_t __riscv_vslide1down_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2,
uint8_t rs1, size_t vl);
vuint8m4_t __riscv_vslide1down_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2,
uint8_t rs1, size_t vl);
vuint8m8_t __riscv_vslide1down_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2,
uint8_t rs1, size_t vl);
vuint16mf4_t __riscv_vslide1down_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
uint16_t rs1, size_t vl);
vuint16mf2_t __riscv_vslide1down_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
uint16_t rs1, size_t vl);
vuint16m1_t __riscv_vslide1down_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
uint16_t rs1, size_t vl);
vuint16m2_t __riscv_vslide1down_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
uint16_t rs1, size_t vl);
vuint16m4_t __riscv_vslide1down_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
uint16_t rs1, size_t vl);
vuint16m8_t __riscv_vslide1down_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
uint16_t rs1, size_t vl);
vuint32mf2_t __riscv_vslide1down_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
uint32_t rs1, size_t vl);
vuint32m1_t __riscv_vslide1down_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
uint32_t rs1, size_t vl);
vuint32m2_t __riscv_vslide1down_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
uint32_t rs1, size_t vl);
vuint32m4_t __riscv_vslide1down_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
uint32_t rs1, size_t vl);
vuint32m8_t __riscv_vslide1down_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
uint32_t rs1, size_t vl);
vuint64m1_t __riscv_vslide1down_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
uint64_t rs1, size_t vl);
vuint64m2_t __riscv_vslide1down_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
uint64_t rs1, size_t vl);
vuint64m4_t __riscv_vslide1down_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
uint64_t rs1, size_t vl);
vuint64m8_t __riscv_vslide1down_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
uint64_t rs1, size_t vl);
vfloat16mf4_t __riscv_vrgather_vv_f16mf4(vfloat16mf4_t vs2, vuint16mf4_t vs1,
size_t vl);
vfloat16mf4_t __riscv_vrgather_vx_f16mf4(vfloat16mf4_t vs2, size_t vs1,
size_t vl);
vfloat16mf2_t __riscv_vrgather_vv_f16mf2(vfloat16mf2_t vs2, vuint16mf2_t vs1,
size_t vl);
vfloat16mf2_t __riscv_vrgather_vx_f16mf2(vfloat16mf2_t vs2, size_t vs1,
size_t vl);
vfloat16m1_t __riscv_vrgather_vv_f16m1(vfloat16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vfloat16m1_t __riscv_vrgather_vx_f16m1(vfloat16m1_t vs2, size_t vs1, size_t vl);
vfloat16m2_t __riscv_vrgather_vv_f16m2(vfloat16m2_t vs2, vuint16m2_t vs1,
size_t vl);
vfloat16m2_t __riscv_vrgather_vx_f16m2(vfloat16m2_t vs2, size_t vs1, size_t vl);
vfloat16m4_t __riscv_vrgather_vv_f16m4(vfloat16m4_t vs2, vuint16m4_t vs1,
size_t vl);
vfloat16m4_t __riscv_vrgather_vx_f16m4(vfloat16m4_t vs2, size_t vs1, size_t vl);
vfloat16m8_t __riscv_vrgather_vv_f16m8(vfloat16m8_t vs2, vuint16m8_t vs1,
size_t vl);
vfloat16m8_t __riscv_vrgather_vx_f16m8(vfloat16m8_t vs2, size_t vs1, size_t vl);
vfloat32mf2_t __riscv_vrgather_vv_f32mf2(vfloat32mf2_t vs2, vuint32mf2_t vs1,
size_t vl);
vfloat32mf2_t __riscv_vrgather_vx_f32mf2(vfloat32mf2_t vs2, size_t vs1,
size_t vl);
vfloat32m1_t __riscv_vrgather_vv_f32m1(vfloat32m1_t vs2, vuint32m1_t vs1,
size_t vl);
vfloat32m1_t __riscv_vrgather_vx_f32m1(vfloat32m1_t vs2, size_t vs1, size_t vl);
vfloat32m2_t __riscv_vrgather_vv_f32m2(vfloat32m2_t vs2, vuint32m2_t vs1,
size_t vl);
vfloat32m2_t __riscv_vrgather_vx_f32m2(vfloat32m2_t vs2, size_t vs1, size_t vl);
vfloat32m4_t __riscv_vrgather_vv_f32m4(vfloat32m4_t vs2, vuint32m4_t vs1,
size_t vl);
vfloat32m4_t __riscv_vrgather_vx_f32m4(vfloat32m4_t vs2, size_t vs1, size_t vl);
vfloat32m8_t __riscv_vrgather_vv_f32m8(vfloat32m8_t vs2, vuint32m8_t vs1,
size_t vl);
vfloat32m8_t __riscv_vrgather_vx_f32m8(vfloat32m8_t vs2, size_t vs1, size_t vl);
vfloat64m1_t __riscv_vrgather_vv_f64m1(vfloat64m1_t vs2, vuint64m1_t vs1,
size_t vl);
vfloat64m1_t __riscv_vrgather_vx_f64m1(vfloat64m1_t vs2, size_t vs1, size_t vl);
vfloat64m2_t __riscv_vrgather_vv_f64m2(vfloat64m2_t vs2, vuint64m2_t vs1,
size_t vl);
vfloat64m2_t __riscv_vrgather_vx_f64m2(vfloat64m2_t vs2, size_t vs1, size_t vl);
vfloat64m4_t __riscv_vrgather_vv_f64m4(vfloat64m4_t vs2, vuint64m4_t vs1,
size_t vl);
vfloat64m4_t __riscv_vrgather_vx_f64m4(vfloat64m4_t vs2, size_t vs1, size_t vl);
vfloat64m8_t __riscv_vrgather_vv_f64m8(vfloat64m8_t vs2, vuint64m8_t vs1,
size_t vl);
vfloat64m8_t __riscv_vrgather_vx_f64m8(vfloat64m8_t vs2, size_t vs1, size_t vl);
vfloat16mf4_t __riscv_vrgatherei16_vv_f16mf4(vfloat16mf4_t vs2,
vuint16mf4_t vs1, size_t vl);
vfloat16mf2_t __riscv_vrgatherei16_vv_f16mf2(vfloat16mf2_t vs2,
vuint16mf2_t vs1, size_t vl);
vfloat16m1_t __riscv_vrgatherei16_vv_f16m1(vfloat16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vfloat16m2_t __riscv_vrgatherei16_vv_f16m2(vfloat16m2_t vs2, vuint16m2_t vs1,
size_t vl);
vfloat16m4_t __riscv_vrgatherei16_vv_f16m4(vfloat16m4_t vs2, vuint16m4_t vs1,
size_t vl);
vfloat16m8_t __riscv_vrgatherei16_vv_f16m8(vfloat16m8_t vs2, vuint16m8_t vs1,
size_t vl);
vfloat32mf2_t __riscv_vrgatherei16_vv_f32mf2(vfloat32mf2_t vs2,
vuint16mf4_t vs1, size_t vl);
vfloat32m1_t __riscv_vrgatherei16_vv_f32m1(vfloat32m1_t vs2, vuint16mf2_t vs1,
size_t vl);
vfloat32m2_t __riscv_vrgatherei16_vv_f32m2(vfloat32m2_t vs2, vuint16m1_t vs1,
size_t vl);
vfloat32m4_t __riscv_vrgatherei16_vv_f32m4(vfloat32m4_t vs2, vuint16m2_t vs1,
size_t vl);
vfloat32m8_t __riscv_vrgatherei16_vv_f32m8(vfloat32m8_t vs2, vuint16m4_t vs1,
size_t vl);
vfloat64m1_t __riscv_vrgatherei16_vv_f64m1(vfloat64m1_t vs2, vuint16mf4_t vs1,
size_t vl);
vfloat64m2_t __riscv_vrgatherei16_vv_f64m2(vfloat64m2_t vs2, vuint16mf2_t vs1,
size_t vl);
vfloat64m4_t __riscv_vrgatherei16_vv_f64m4(vfloat64m4_t vs2, vuint16m1_t vs1,
size_t vl);
vfloat64m8_t __riscv_vrgatherei16_vv_f64m8(vfloat64m8_t vs2, vuint16m2_t vs1,
size_t vl);
vint8mf8_t __riscv_vrgather_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1,
size_t vl);
vint8mf8_t __riscv_vrgather_vx_i8mf8(vint8mf8_t vs2, size_t vs1, size_t vl);
vint8mf4_t __riscv_vrgather_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1,
size_t vl);
vint8mf4_t __riscv_vrgather_vx_i8mf4(vint8mf4_t vs2, size_t vs1, size_t vl);
vint8mf2_t __riscv_vrgather_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1,
size_t vl);
vint8mf2_t __riscv_vrgather_vx_i8mf2(vint8mf2_t vs2, size_t vs1, size_t vl);
vint8m1_t __riscv_vrgather_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vrgather_vx_i8m1(vint8m1_t vs2, size_t vs1, size_t vl);
vint8m2_t __riscv_vrgather_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl);
vint8m2_t __riscv_vrgather_vx_i8m2(vint8m2_t vs2, size_t vs1, size_t vl);
vint8m4_t __riscv_vrgather_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl);
vint8m4_t __riscv_vrgather_vx_i8m4(vint8m4_t vs2, size_t vs1, size_t vl);
vint8m8_t __riscv_vrgather_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl);
vint8m8_t __riscv_vrgather_vx_i8m8(vint8m8_t vs2, size_t vs1, size_t vl);
vint16mf4_t __riscv_vrgather_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1,
size_t vl);
vint16mf4_t __riscv_vrgather_vx_i16mf4(vint16mf4_t vs2, size_t vs1, size_t vl);
vint16mf2_t __riscv_vrgather_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1,
size_t vl);
vint16mf2_t __riscv_vrgather_vx_i16mf2(vint16mf2_t vs2, size_t vs1, size_t vl);
vint16m1_t __riscv_vrgather_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vrgather_vx_i16m1(vint16m1_t vs2, size_t vs1, size_t vl);
vint16m2_t __riscv_vrgather_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1,
size_t vl);
vint16m2_t __riscv_vrgather_vx_i16m2(vint16m2_t vs2, size_t vs1, size_t vl);
vint16m4_t __riscv_vrgather_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1,
size_t vl);
vint16m4_t __riscv_vrgather_vx_i16m4(vint16m4_t vs2, size_t vs1, size_t vl);
vint16m8_t __riscv_vrgather_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1,
size_t vl);
vint16m8_t __riscv_vrgather_vx_i16m8(vint16m8_t vs2, size_t vs1, size_t vl);
vint32mf2_t __riscv_vrgather_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1,
size_t vl);
vint32mf2_t __riscv_vrgather_vx_i32mf2(vint32mf2_t vs2, size_t vs1, size_t vl);
vint32m1_t __riscv_vrgather_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vrgather_vx_i32m1(vint32m1_t vs2, size_t vs1, size_t vl);
vint32m2_t __riscv_vrgather_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1,
size_t vl);
vint32m2_t __riscv_vrgather_vx_i32m2(vint32m2_t vs2, size_t vs1, size_t vl);
vint32m4_t __riscv_vrgather_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1,
size_t vl);
vint32m4_t __riscv_vrgather_vx_i32m4(vint32m4_t vs2, size_t vs1, size_t vl);
vint32m8_t __riscv_vrgather_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1,
size_t vl);
vint32m8_t __riscv_vrgather_vx_i32m8(vint32m8_t vs2, size_t vs1, size_t vl);
vint64m1_t __riscv_vrgather_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vrgather_vx_i64m1(vint64m1_t vs2, size_t vs1, size_t vl);
vint64m2_t __riscv_vrgather_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1,
size_t vl);
vint64m2_t __riscv_vrgather_vx_i64m2(vint64m2_t vs2, size_t vs1, size_t vl);
vint64m4_t __riscv_vrgather_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1,
size_t vl);
vint64m4_t __riscv_vrgather_vx_i64m4(vint64m4_t vs2, size_t vs1, size_t vl);
vint64m8_t __riscv_vrgather_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1,
size_t vl);
vint64m8_t __riscv_vrgather_vx_i64m8(vint64m8_t vs2, size_t vs1, size_t vl);
vint8mf8_t __riscv_vrgatherei16_vv_i8mf8(vint8mf8_t vs2, vuint16mf4_t vs1,
size_t vl);
vint8mf4_t __riscv_vrgatherei16_vv_i8mf4(vint8mf4_t vs2, vuint16mf2_t vs1,
size_t vl);
vint8mf2_t __riscv_vrgatherei16_vv_i8mf2(vint8mf2_t vs2, vuint16m1_t vs1,
size_t vl);
vint8m1_t __riscv_vrgatherei16_vv_i8m1(vint8m1_t vs2, vuint16m2_t vs1,
size_t vl);
vint8m2_t __riscv_vrgatherei16_vv_i8m2(vint8m2_t vs2, vuint16m4_t vs1,
size_t vl);
vint8m4_t __riscv_vrgatherei16_vv_i8m4(vint8m4_t vs2, vuint16m8_t vs1,
size_t vl);
vint16mf4_t __riscv_vrgatherei16_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1,
size_t vl);
vint16mf2_t __riscv_vrgatherei16_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1,
size_t vl);
vint16m1_t __riscv_vrgatherei16_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vint16m2_t __riscv_vrgatherei16_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1,
size_t vl);
vint16m4_t __riscv_vrgatherei16_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1,
size_t vl);
vint16m8_t __riscv_vrgatherei16_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1,
size_t vl);
vint32mf2_t __riscv_vrgatherei16_vv_i32mf2(vint32mf2_t vs2, vuint16mf4_t vs1,
size_t vl);
vint32m1_t __riscv_vrgatherei16_vv_i32m1(vint32m1_t vs2, vuint16mf2_t vs1,
size_t vl);
vint32m2_t __riscv_vrgatherei16_vv_i32m2(vint32m2_t vs2, vuint16m1_t vs1,
size_t vl);
vint32m4_t __riscv_vrgatherei16_vv_i32m4(vint32m4_t vs2, vuint16m2_t vs1,
size_t vl);
vint32m8_t __riscv_vrgatherei16_vv_i32m8(vint32m8_t vs2, vuint16m4_t vs1,
size_t vl);
vint64m1_t __riscv_vrgatherei16_vv_i64m1(vint64m1_t vs2, vuint16mf4_t vs1,
size_t vl);
vint64m2_t __riscv_vrgatherei16_vv_i64m2(vint64m2_t vs2, vuint16mf2_t vs1,
size_t vl);
vint64m4_t __riscv_vrgatherei16_vv_i64m4(vint64m4_t vs2, vuint16m1_t vs1,
size_t vl);
vint64m8_t __riscv_vrgatherei16_vv_i64m8(vint64m8_t vs2, vuint16m2_t vs1,
size_t vl);
vuint8mf8_t __riscv_vrgather_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1,
size_t vl);
vuint8mf8_t __riscv_vrgather_vx_u8mf8(vuint8mf8_t vs2, size_t vs1, size_t vl);
vuint8mf4_t __riscv_vrgather_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1,
size_t vl);
vuint8mf4_t __riscv_vrgather_vx_u8mf4(vuint8mf4_t vs2, size_t vs1, size_t vl);
vuint8mf2_t __riscv_vrgather_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1,
size_t vl);
vuint8mf2_t __riscv_vrgather_vx_u8mf2(vuint8mf2_t vs2, size_t vs1, size_t vl);
vuint8m1_t __riscv_vrgather_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vrgather_vx_u8m1(vuint8m1_t vs2, size_t vs1, size_t vl);
vuint8m2_t __riscv_vrgather_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl);
vuint8m2_t __riscv_vrgather_vx_u8m2(vuint8m2_t vs2, size_t vs1, size_t vl);
vuint8m4_t __riscv_vrgather_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl);
vuint8m4_t __riscv_vrgather_vx_u8m4(vuint8m4_t vs2, size_t vs1, size_t vl);
vuint8m8_t __riscv_vrgather_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl);
vuint8m8_t __riscv_vrgather_vx_u8m8(vuint8m8_t vs2, size_t vs1, size_t vl);
vuint16mf4_t __riscv_vrgather_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1,
size_t vl);
vuint16mf4_t __riscv_vrgather_vx_u16mf4(vuint16mf4_t vs2, size_t vs1,
size_t vl);
vuint16mf2_t __riscv_vrgather_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1,
size_t vl);
vuint16mf2_t __riscv_vrgather_vx_u16mf2(vuint16mf2_t vs2, size_t vs1,
size_t vl);
vuint16m1_t __riscv_vrgather_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vrgather_vx_u16m1(vuint16m1_t vs2, size_t vs1, size_t vl);
vuint16m2_t __riscv_vrgather_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1,
size_t vl);
vuint16m2_t __riscv_vrgather_vx_u16m2(vuint16m2_t vs2, size_t vs1, size_t vl);
vuint16m4_t __riscv_vrgather_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1,
size_t vl);
vuint16m4_t __riscv_vrgather_vx_u16m4(vuint16m4_t vs2, size_t vs1, size_t vl);
vuint16m8_t __riscv_vrgather_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1,
size_t vl);
vuint16m8_t __riscv_vrgather_vx_u16m8(vuint16m8_t vs2, size_t vs1, size_t vl);
vuint32mf2_t __riscv_vrgather_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1,
size_t vl);
vuint32mf2_t __riscv_vrgather_vx_u32mf2(vuint32mf2_t vs2, size_t vs1,
size_t vl);
vuint32m1_t __riscv_vrgather_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vrgather_vx_u32m1(vuint32m1_t vs2, size_t vs1, size_t vl);
vuint32m2_t __riscv_vrgather_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1,
size_t vl);
vuint32m2_t __riscv_vrgather_vx_u32m2(vuint32m2_t vs2, size_t vs1, size_t vl);
vuint32m4_t __riscv_vrgather_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1,
size_t vl);
vuint32m4_t __riscv_vrgather_vx_u32m4(vuint32m4_t vs2, size_t vs1, size_t vl);
vuint32m8_t __riscv_vrgather_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1,
size_t vl);
vuint32m8_t __riscv_vrgather_vx_u32m8(vuint32m8_t vs2, size_t vs1, size_t vl);
vuint64m1_t __riscv_vrgather_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vrgather_vx_u64m1(vuint64m1_t vs2, size_t vs1, size_t vl);
vuint64m2_t __riscv_vrgather_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1,
size_t vl);
vuint64m2_t __riscv_vrgather_vx_u64m2(vuint64m2_t vs2, size_t vs1, size_t vl);
vuint64m4_t __riscv_vrgather_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1,
size_t vl);
vuint64m4_t __riscv_vrgather_vx_u64m4(vuint64m4_t vs2, size_t vs1, size_t vl);
vuint64m8_t __riscv_vrgather_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1,
size_t vl);
vuint64m8_t __riscv_vrgather_vx_u64m8(vuint64m8_t vs2, size_t vs1, size_t vl);
vuint8mf8_t __riscv_vrgatherei16_vv_u8mf8(vuint8mf8_t vs2, vuint16mf4_t vs1,
size_t vl);
vuint8mf4_t __riscv_vrgatherei16_vv_u8mf4(vuint8mf4_t vs2, vuint16mf2_t vs1,
size_t vl);
vuint8mf2_t __riscv_vrgatherei16_vv_u8mf2(vuint8mf2_t vs2, vuint16m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vrgatherei16_vv_u8m1(vuint8m1_t vs2, vuint16m2_t vs1,
size_t vl);
vuint8m2_t __riscv_vrgatherei16_vv_u8m2(vuint8m2_t vs2, vuint16m4_t vs1,
size_t vl);
vuint8m4_t __riscv_vrgatherei16_vv_u8m4(vuint8m4_t vs2, vuint16m8_t vs1,
size_t vl);
vuint16mf4_t __riscv_vrgatherei16_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1,
size_t vl);
vuint16mf2_t __riscv_vrgatherei16_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1,
size_t vl);
vuint16m1_t __riscv_vrgatherei16_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m2_t __riscv_vrgatherei16_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1,
size_t vl);
vuint16m4_t __riscv_vrgatherei16_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1,
size_t vl);
vuint16m8_t __riscv_vrgatherei16_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1,
size_t vl);
vuint32mf2_t __riscv_vrgatherei16_vv_u32mf2(vuint32mf2_t vs2, vuint16mf4_t vs1,
size_t vl);
vuint32m1_t __riscv_vrgatherei16_vv_u32m1(vuint32m1_t vs2, vuint16mf2_t vs1,
size_t vl);
vuint32m2_t __riscv_vrgatherei16_vv_u32m2(vuint32m2_t vs2, vuint16m1_t vs1,
size_t vl);
vuint32m4_t __riscv_vrgatherei16_vv_u32m4(vuint32m4_t vs2, vuint16m2_t vs1,
size_t vl);
vuint32m8_t __riscv_vrgatherei16_vv_u32m8(vuint32m8_t vs2, vuint16m4_t vs1,
size_t vl);
vuint64m1_t __riscv_vrgatherei16_vv_u64m1(vuint64m1_t vs2, vuint16mf4_t vs1,
size_t vl);
vuint64m2_t __riscv_vrgatherei16_vv_u64m2(vuint64m2_t vs2, vuint16mf2_t vs1,
size_t vl);
vuint64m4_t __riscv_vrgatherei16_vv_u64m4(vuint64m4_t vs2, vuint16m1_t vs1,
size_t vl);
vuint64m8_t __riscv_vrgatherei16_vv_u64m8(vuint64m8_t vs2, vuint16m2_t vs1,
size_t vl);
// masked functions
vfloat16mf4_t __riscv_vrgather_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
vuint16mf4_t vs1, size_t vl);
vfloat16mf4_t __riscv_vrgather_vx_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
size_t vs1, size_t vl);
vfloat16mf2_t __riscv_vrgather_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
vuint16mf2_t vs1, size_t vl);
vfloat16mf2_t __riscv_vrgather_vx_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
size_t vs1, size_t vl);
vfloat16m1_t __riscv_vrgather_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vfloat16m1_t __riscv_vrgather_vx_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
size_t vs1, size_t vl);
vfloat16m2_t __riscv_vrgather_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
vuint16m2_t vs1, size_t vl);
vfloat16m2_t __riscv_vrgather_vx_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
size_t vs1, size_t vl);
vfloat16m4_t __riscv_vrgather_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
vuint16m4_t vs1, size_t vl);
vfloat16m4_t __riscv_vrgather_vx_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
size_t vs1, size_t vl);
vfloat16m8_t __riscv_vrgather_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
vuint16m8_t vs1, size_t vl);
vfloat16m8_t __riscv_vrgather_vx_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
size_t vs1, size_t vl);
vfloat32mf2_t __riscv_vrgather_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
vuint32mf2_t vs1, size_t vl);
vfloat32mf2_t __riscv_vrgather_vx_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vs1, size_t vl);
vfloat32m1_t __riscv_vrgather_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vfloat32m1_t __riscv_vrgather_vx_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vs1, size_t vl);
vfloat32m2_t __riscv_vrgather_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
vuint32m2_t vs1, size_t vl);
vfloat32m2_t __riscv_vrgather_vx_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vs1, size_t vl);
vfloat32m4_t __riscv_vrgather_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
vuint32m4_t vs1, size_t vl);
vfloat32m4_t __riscv_vrgather_vx_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vs1, size_t vl);
vfloat32m8_t __riscv_vrgather_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
vuint32m8_t vs1, size_t vl);
vfloat32m8_t __riscv_vrgather_vx_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vs1, size_t vl);
vfloat64m1_t __riscv_vrgather_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vfloat64m1_t __riscv_vrgather_vx_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
size_t vs1, size_t vl);
vfloat64m2_t __riscv_vrgather_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
vuint64m2_t vs1, size_t vl);
vfloat64m2_t __riscv_vrgather_vx_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
size_t vs1, size_t vl);
vfloat64m4_t __riscv_vrgather_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
vuint64m4_t vs1, size_t vl);
vfloat64m4_t __riscv_vrgather_vx_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
size_t vs1, size_t vl);
vfloat64m8_t __riscv_vrgather_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
vuint64m8_t vs1, size_t vl);
vfloat64m8_t __riscv_vrgather_vx_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
size_t vs1, size_t vl);
vfloat16mf4_t __riscv_vrgatherei16_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
vuint16mf4_t vs1, size_t vl);
vfloat16mf2_t __riscv_vrgatherei16_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
vuint16mf2_t vs1, size_t vl);
vfloat16m1_t __riscv_vrgatherei16_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vfloat16m2_t __riscv_vrgatherei16_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
vuint16m2_t vs1, size_t vl);
vfloat16m4_t __riscv_vrgatherei16_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
vuint16m4_t vs1, size_t vl);
vfloat16m8_t __riscv_vrgatherei16_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
vuint16m8_t vs1, size_t vl);
vfloat32mf2_t __riscv_vrgatherei16_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
vuint16mf4_t vs1, size_t vl);
vfloat32m1_t __riscv_vrgatherei16_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
vuint16mf2_t vs1, size_t vl);
vfloat32m2_t __riscv_vrgatherei16_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
vuint16m1_t vs1, size_t vl);
vfloat32m4_t __riscv_vrgatherei16_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
vuint16m2_t vs1, size_t vl);
vfloat32m8_t __riscv_vrgatherei16_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
vuint16m4_t vs1, size_t vl);
vfloat64m1_t __riscv_vrgatherei16_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
vuint16mf4_t vs1, size_t vl);
vfloat64m2_t __riscv_vrgatherei16_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
vuint16mf2_t vs1, size_t vl);
vfloat64m4_t __riscv_vrgatherei16_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
vuint16m1_t vs1, size_t vl);
vfloat64m8_t __riscv_vrgatherei16_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
vuint16m2_t vs1, size_t vl);
vint8mf8_t __riscv_vrgather_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2,
vuint8mf8_t vs1, size_t vl);
vint8mf8_t __riscv_vrgather_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t vs1,
size_t vl);
vint8mf4_t __riscv_vrgather_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2,
vuint8mf4_t vs1, size_t vl);
vint8mf4_t __riscv_vrgather_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t vs1,
size_t vl);
vint8mf2_t __riscv_vrgather_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2,
vuint8mf2_t vs1, size_t vl);
vint8mf2_t __riscv_vrgather_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t vs1,
size_t vl);
vint8m1_t __riscv_vrgather_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1,
size_t vl);
vint8m1_t __riscv_vrgather_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t vs1,
size_t vl);
vint8m2_t __riscv_vrgather_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1,
size_t vl);
vint8m2_t __riscv_vrgather_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t vs1,
size_t vl);
vint8m4_t __riscv_vrgather_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1,
size_t vl);
vint8m4_t __riscv_vrgather_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t vs1,
size_t vl);
vint8m8_t __riscv_vrgather_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1,
size_t vl);
vint8m8_t __riscv_vrgather_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t vs1,
size_t vl);
vint16mf4_t __riscv_vrgather_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
vuint16mf4_t vs1, size_t vl);
vint16mf4_t __riscv_vrgather_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
size_t vs1, size_t vl);
vint16mf2_t __riscv_vrgather_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
vuint16mf2_t vs1, size_t vl);
vint16mf2_t __riscv_vrgather_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
size_t vs1, size_t vl);
vint16m1_t __riscv_vrgather_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vrgather_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t vs1,
size_t vl);
vint16m2_t __riscv_vrgather_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2,
vuint16m2_t vs1, size_t vl);
vint16m2_t __riscv_vrgather_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t vs1,
size_t vl);
vint16m4_t __riscv_vrgather_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2,
vuint16m4_t vs1, size_t vl);
vint16m4_t __riscv_vrgather_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t vs1,
size_t vl);
vint16m8_t __riscv_vrgather_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2,
vuint16m8_t vs1, size_t vl);
vint16m8_t __riscv_vrgather_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t vs1,
size_t vl);
vint32mf2_t __riscv_vrgather_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
vuint32mf2_t vs1, size_t vl);
vint32mf2_t __riscv_vrgather_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
size_t vs1, size_t vl);
vint32m1_t __riscv_vrgather_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vrgather_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t vs1,
size_t vl);
vint32m2_t __riscv_vrgather_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2,
vuint32m2_t vs1, size_t vl);
vint32m2_t __riscv_vrgather_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t vs1,
size_t vl);
vint32m4_t __riscv_vrgather_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2,
vuint32m4_t vs1, size_t vl);
vint32m4_t __riscv_vrgather_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t vs1,
size_t vl);
vint32m8_t __riscv_vrgather_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2,
vuint32m8_t vs1, size_t vl);
vint32m8_t __riscv_vrgather_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t vs1,
size_t vl);
vint64m1_t __riscv_vrgather_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vrgather_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t vs1,
size_t vl);
vint64m2_t __riscv_vrgather_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2,
vuint64m2_t vs1, size_t vl);
vint64m2_t __riscv_vrgather_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t vs1,
size_t vl);
vint64m4_t __riscv_vrgather_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2,
vuint64m4_t vs1, size_t vl);
vint64m4_t __riscv_vrgather_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t vs1,
size_t vl);
vint64m8_t __riscv_vrgather_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2,
vuint64m8_t vs1, size_t vl);
vint64m8_t __riscv_vrgather_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t vs1,
size_t vl);
vint8mf8_t __riscv_vrgatherei16_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2,
vuint16mf4_t vs1, size_t vl);
vint8mf4_t __riscv_vrgatherei16_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2,
vuint16mf2_t vs1, size_t vl);
vint8mf2_t __riscv_vrgatherei16_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vint8m1_t __riscv_vrgatherei16_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2,
vuint16m2_t vs1, size_t vl);
vint8m2_t __riscv_vrgatherei16_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2,
vuint16m4_t vs1, size_t vl);
vint8m4_t __riscv_vrgatherei16_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2,
vuint16m8_t vs1, size_t vl);
vint16mf4_t __riscv_vrgatherei16_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
vuint16mf4_t vs1, size_t vl);
vint16mf2_t __riscv_vrgatherei16_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
vuint16mf2_t vs1, size_t vl);
vint16m1_t __riscv_vrgatherei16_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vint16m2_t __riscv_vrgatherei16_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2,
vuint16m2_t vs1, size_t vl);
vint16m4_t __riscv_vrgatherei16_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2,
vuint16m4_t vs1, size_t vl);
vint16m8_t __riscv_vrgatherei16_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2,
vuint16m8_t vs1, size_t vl);
vint32mf2_t __riscv_vrgatherei16_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
vuint16mf4_t vs1, size_t vl);
vint32m1_t __riscv_vrgatherei16_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2,
vuint16mf2_t vs1, size_t vl);
vint32m2_t __riscv_vrgatherei16_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2,
vuint16m1_t vs1, size_t vl);
vint32m4_t __riscv_vrgatherei16_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2,
vuint16m2_t vs1, size_t vl);
vint32m8_t __riscv_vrgatherei16_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2,
vuint16m4_t vs1, size_t vl);
vint64m1_t __riscv_vrgatherei16_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2,
vuint16mf4_t vs1, size_t vl);
vint64m2_t __riscv_vrgatherei16_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2,
vuint16mf2_t vs1, size_t vl);
vint64m4_t __riscv_vrgatherei16_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2,
vuint16m1_t vs1, size_t vl);
vint64m8_t __riscv_vrgatherei16_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2,
vuint16m2_t vs1, size_t vl);
vuint8mf8_t __riscv_vrgather_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
vuint8mf8_t vs1, size_t vl);
vuint8mf8_t __riscv_vrgather_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
size_t vs1, size_t vl);
vuint8mf4_t __riscv_vrgather_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
vuint8mf4_t vs1, size_t vl);
vuint8mf4_t __riscv_vrgather_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
size_t vs1, size_t vl);
vuint8mf2_t __riscv_vrgather_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
vuint8mf2_t vs1, size_t vl);
vuint8mf2_t __riscv_vrgather_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
size_t vs1, size_t vl);
vuint8m1_t __riscv_vrgather_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2,
vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vrgather_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vs1,
size_t vl);
vuint8m2_t __riscv_vrgather_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2,
vuint8m2_t vs1, size_t vl);
vuint8m2_t __riscv_vrgather_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vs1,
size_t vl);
vuint8m4_t __riscv_vrgather_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2,
vuint8m4_t vs1, size_t vl);
vuint8m4_t __riscv_vrgather_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vs1,
size_t vl);
vuint8m8_t __riscv_vrgather_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2,
vuint8m8_t vs1, size_t vl);
vuint8m8_t __riscv_vrgather_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vs1,
size_t vl);
vuint16mf4_t __riscv_vrgather_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
vuint16mf4_t vs1, size_t vl);
vuint16mf4_t __riscv_vrgather_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
size_t vs1, size_t vl);
vuint16mf2_t __riscv_vrgather_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
vuint16mf2_t vs1, size_t vl);
vuint16mf2_t __riscv_vrgather_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
size_t vs1, size_t vl);
vuint16m1_t __riscv_vrgather_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vrgather_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
size_t vs1, size_t vl);
vuint16m2_t __riscv_vrgather_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
vuint16m2_t vs1, size_t vl);
vuint16m2_t __riscv_vrgather_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
size_t vs1, size_t vl);
vuint16m4_t __riscv_vrgather_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
vuint16m4_t vs1, size_t vl);
vuint16m4_t __riscv_vrgather_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
size_t vs1, size_t vl);
vuint16m8_t __riscv_vrgather_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
vuint16m8_t vs1, size_t vl);
vuint16m8_t __riscv_vrgather_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
size_t vs1, size_t vl);
vuint32mf2_t __riscv_vrgather_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
vuint32mf2_t vs1, size_t vl);
vuint32mf2_t __riscv_vrgather_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
size_t vs1, size_t vl);
vuint32m1_t __riscv_vrgather_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vrgather_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
size_t vs1, size_t vl);
vuint32m2_t __riscv_vrgather_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
vuint32m2_t vs1, size_t vl);
vuint32m2_t __riscv_vrgather_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
size_t vs1, size_t vl);
vuint32m4_t __riscv_vrgather_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
vuint32m4_t vs1, size_t vl);
vuint32m4_t __riscv_vrgather_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
size_t vs1, size_t vl);
vuint32m8_t __riscv_vrgather_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
vuint32m8_t vs1, size_t vl);
vuint32m8_t __riscv_vrgather_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
size_t vs1, size_t vl);
vuint64m1_t __riscv_vrgather_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vrgather_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
size_t vs1, size_t vl);
vuint64m2_t __riscv_vrgather_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
vuint64m2_t vs1, size_t vl);
vuint64m2_t __riscv_vrgather_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
size_t vs1, size_t vl);
vuint64m4_t __riscv_vrgather_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
vuint64m4_t vs1, size_t vl);
vuint64m4_t __riscv_vrgather_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
size_t vs1, size_t vl);
vuint64m8_t __riscv_vrgather_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
vuint64m8_t vs1, size_t vl);
vuint64m8_t __riscv_vrgather_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
size_t vs1, size_t vl);
vuint8mf8_t __riscv_vrgatherei16_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
vuint16mf4_t vs1, size_t vl);
vuint8mf4_t __riscv_vrgatherei16_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
vuint16mf2_t vs1, size_t vl);
vuint8mf2_t __riscv_vrgatherei16_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint8m1_t __riscv_vrgatherei16_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2,
vuint16m2_t vs1, size_t vl);
vuint8m2_t __riscv_vrgatherei16_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2,
vuint16m4_t vs1, size_t vl);
vuint8m4_t __riscv_vrgatherei16_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2,
vuint16m8_t vs1, size_t vl);
vuint16mf4_t __riscv_vrgatherei16_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
vuint16mf4_t vs1, size_t vl);
vuint16mf2_t __riscv_vrgatherei16_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
vuint16mf2_t vs1, size_t vl);
vuint16m1_t __riscv_vrgatherei16_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
vuint16m1_t vs1, size_t vl);
vuint16m2_t __riscv_vrgatherei16_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
vuint16m2_t vs1, size_t vl);
vuint16m4_t __riscv_vrgatherei16_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
vuint16m4_t vs1, size_t vl);
vuint16m8_t __riscv_vrgatherei16_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
vuint16m8_t vs1, size_t vl);
vuint32mf2_t __riscv_vrgatherei16_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
vuint16mf4_t vs1, size_t vl);
vuint32m1_t __riscv_vrgatherei16_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
vuint16mf2_t vs1, size_t vl);
vuint32m2_t __riscv_vrgatherei16_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
vuint16m1_t vs1, size_t vl);
vuint32m4_t __riscv_vrgatherei16_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
vuint16m2_t vs1, size_t vl);
vuint32m8_t __riscv_vrgatherei16_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
vuint16m4_t vs1, size_t vl);
vuint64m1_t __riscv_vrgatherei16_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
vuint16mf4_t vs1, size_t vl);
vuint64m2_t __riscv_vrgatherei16_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
vuint16mf2_t vs1, size_t vl);
vuint64m4_t __riscv_vrgatherei16_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
vuint16m1_t vs1, size_t vl);
vuint64m8_t __riscv_vrgatherei16_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
vuint16m2_t vs1, size_t vl);
vfloat16mf4_t __riscv_vcompress_vm_f16mf4(vfloat16mf4_t vs2, vbool64_t vs1,
size_t vl);
vfloat16mf2_t __riscv_vcompress_vm_f16mf2(vfloat16mf2_t vs2, vbool32_t vs1,
size_t vl);
vfloat16m1_t __riscv_vcompress_vm_f16m1(vfloat16m1_t vs2, vbool16_t vs1,
size_t vl);
vfloat16m2_t __riscv_vcompress_vm_f16m2(vfloat16m2_t vs2, vbool8_t vs1,
size_t vl);
vfloat16m4_t __riscv_vcompress_vm_f16m4(vfloat16m4_t vs2, vbool4_t vs1,
size_t vl);
vfloat16m8_t __riscv_vcompress_vm_f16m8(vfloat16m8_t vs2, vbool2_t vs1,
size_t vl);
vfloat32mf2_t __riscv_vcompress_vm_f32mf2(vfloat32mf2_t vs2, vbool64_t vs1,
size_t vl);
vfloat32m1_t __riscv_vcompress_vm_f32m1(vfloat32m1_t vs2, vbool32_t vs1,
size_t vl);
vfloat32m2_t __riscv_vcompress_vm_f32m2(vfloat32m2_t vs2, vbool16_t vs1,
size_t vl);
vfloat32m4_t __riscv_vcompress_vm_f32m4(vfloat32m4_t vs2, vbool8_t vs1,
size_t vl);
vfloat32m8_t __riscv_vcompress_vm_f32m8(vfloat32m8_t vs2, vbool4_t vs1,
size_t vl);
vfloat64m1_t __riscv_vcompress_vm_f64m1(vfloat64m1_t vs2, vbool64_t vs1,
size_t vl);
vfloat64m2_t __riscv_vcompress_vm_f64m2(vfloat64m2_t vs2, vbool32_t vs1,
size_t vl);
vfloat64m4_t __riscv_vcompress_vm_f64m4(vfloat64m4_t vs2, vbool16_t vs1,
size_t vl);
vfloat64m8_t __riscv_vcompress_vm_f64m8(vfloat64m8_t vs2, vbool8_t vs1,
size_t vl);
vint8mf8_t __riscv_vcompress_vm_i8mf8(vint8mf8_t vs2, vbool64_t vs1, size_t vl);
vint8mf4_t __riscv_vcompress_vm_i8mf4(vint8mf4_t vs2, vbool32_t vs1, size_t vl);
vint8mf2_t __riscv_vcompress_vm_i8mf2(vint8mf2_t vs2, vbool16_t vs1, size_t vl);
vint8m1_t __riscv_vcompress_vm_i8m1(vint8m1_t vs2, vbool8_t vs1, size_t vl);
vint8m2_t __riscv_vcompress_vm_i8m2(vint8m2_t vs2, vbool4_t vs1, size_t vl);
vint8m4_t __riscv_vcompress_vm_i8m4(vint8m4_t vs2, vbool2_t vs1, size_t vl);
vint8m8_t __riscv_vcompress_vm_i8m8(vint8m8_t vs2, vbool1_t vs1, size_t vl);
vint16mf4_t __riscv_vcompress_vm_i16mf4(vint16mf4_t vs2, vbool64_t vs1,
size_t vl);
vint16mf2_t __riscv_vcompress_vm_i16mf2(vint16mf2_t vs2, vbool32_t vs1,
size_t vl);
vint16m1_t __riscv_vcompress_vm_i16m1(vint16m1_t vs2, vbool16_t vs1, size_t vl);
vint16m2_t __riscv_vcompress_vm_i16m2(vint16m2_t vs2, vbool8_t vs1, size_t vl);
vint16m4_t __riscv_vcompress_vm_i16m4(vint16m4_t vs2, vbool4_t vs1, size_t vl);
vint16m8_t __riscv_vcompress_vm_i16m8(vint16m8_t vs2, vbool2_t vs1, size_t vl);
vint32mf2_t __riscv_vcompress_vm_i32mf2(vint32mf2_t vs2, vbool64_t vs1,
size_t vl);
vint32m1_t __riscv_vcompress_vm_i32m1(vint32m1_t vs2, vbool32_t vs1, size_t vl);
vint32m2_t __riscv_vcompress_vm_i32m2(vint32m2_t vs2, vbool16_t vs1, size_t vl);
vint32m4_t __riscv_vcompress_vm_i32m4(vint32m4_t vs2, vbool8_t vs1, size_t vl);
vint32m8_t __riscv_vcompress_vm_i32m8(vint32m8_t vs2, vbool4_t vs1, size_t vl);
vint64m1_t __riscv_vcompress_vm_i64m1(vint64m1_t vs2, vbool64_t vs1, size_t vl);
vint64m2_t __riscv_vcompress_vm_i64m2(vint64m2_t vs2, vbool32_t vs1, size_t vl);
vint64m4_t __riscv_vcompress_vm_i64m4(vint64m4_t vs2, vbool16_t vs1, size_t vl);
vint64m8_t __riscv_vcompress_vm_i64m8(vint64m8_t vs2, vbool8_t vs1, size_t vl);
vuint8mf8_t __riscv_vcompress_vm_u8mf8(vuint8mf8_t vs2, vbool64_t vs1,
size_t vl);
vuint8mf4_t __riscv_vcompress_vm_u8mf4(vuint8mf4_t vs2, vbool32_t vs1,
size_t vl);
vuint8mf2_t __riscv_vcompress_vm_u8mf2(vuint8mf2_t vs2, vbool16_t vs1,
size_t vl);
vuint8m1_t __riscv_vcompress_vm_u8m1(vuint8m1_t vs2, vbool8_t vs1, size_t vl);
vuint8m2_t __riscv_vcompress_vm_u8m2(vuint8m2_t vs2, vbool4_t vs1, size_t vl);
vuint8m4_t __riscv_vcompress_vm_u8m4(vuint8m4_t vs2, vbool2_t vs1, size_t vl);
vuint8m8_t __riscv_vcompress_vm_u8m8(vuint8m8_t vs2, vbool1_t vs1, size_t vl);
vuint16mf4_t __riscv_vcompress_vm_u16mf4(vuint16mf4_t vs2, vbool64_t vs1,
size_t vl);
vuint16mf2_t __riscv_vcompress_vm_u16mf2(vuint16mf2_t vs2, vbool32_t vs1,
size_t vl);
vuint16m1_t __riscv_vcompress_vm_u16m1(vuint16m1_t vs2, vbool16_t vs1,
size_t vl);
vuint16m2_t __riscv_vcompress_vm_u16m2(vuint16m2_t vs2, vbool8_t vs1,
size_t vl);
vuint16m4_t __riscv_vcompress_vm_u16m4(vuint16m4_t vs2, vbool4_t vs1,
size_t vl);
vuint16m8_t __riscv_vcompress_vm_u16m8(vuint16m8_t vs2, vbool2_t vs1,
size_t vl);
vuint32mf2_t __riscv_vcompress_vm_u32mf2(vuint32mf2_t vs2, vbool64_t vs1,
size_t vl);
vuint32m1_t __riscv_vcompress_vm_u32m1(vuint32m1_t vs2, vbool32_t vs1,
size_t vl);
vuint32m2_t __riscv_vcompress_vm_u32m2(vuint32m2_t vs2, vbool16_t vs1,
size_t vl);
vuint32m4_t __riscv_vcompress_vm_u32m4(vuint32m4_t vs2, vbool8_t vs1,
size_t vl);
vuint32m8_t __riscv_vcompress_vm_u32m8(vuint32m8_t vs2, vbool4_t vs1,
size_t vl);
vuint64m1_t __riscv_vcompress_vm_u64m1(vuint64m1_t vs2, vbool64_t vs1,
size_t vl);
vuint64m2_t __riscv_vcompress_vm_u64m2(vuint64m2_t vs2, vbool32_t vs1,
size_t vl);
vuint64m4_t __riscv_vcompress_vm_u64m4(vuint64m4_t vs2, vbool16_t vs1,
size_t vl);
vuint64m8_t __riscv_vcompress_vm_u64m8(vuint64m8_t vs2, vbool8_t vs1,
size_t vl);