|
| 1 | +#include <riscv_vector.h> |
| 2 | +#include <stdint.h> |
| 3 | + |
| 4 | +vbfloat16mf4_t test_vrgather_vv_bf16mf4(vbfloat16mf4_t vs2, vuint16mf4_t vs1, |
| 5 | + size_t vl) { |
| 6 | + return __riscv_vrgather_vv_bf16mf4(vs2, vs1, vl); |
| 7 | +} |
| 8 | + |
| 9 | +vbfloat16mf4_t test_vrgather_vx_bf16mf4(vbfloat16mf4_t vs2, size_t vs1, |
| 10 | + size_t vl) { |
| 11 | + return __riscv_vrgather_vx_bf16mf4(vs2, vs1, vl); |
| 12 | +} |
| 13 | + |
| 14 | +vbfloat16mf2_t test_vrgather_vv_bf16mf2(vbfloat16mf2_t vs2, vuint16mf2_t vs1, |
| 15 | + size_t vl) { |
| 16 | + return __riscv_vrgather_vv_bf16mf2(vs2, vs1, vl); |
| 17 | +} |
| 18 | + |
| 19 | +vbfloat16mf2_t test_vrgather_vx_bf16mf2(vbfloat16mf2_t vs2, size_t vs1, |
| 20 | + size_t vl) { |
| 21 | + return __riscv_vrgather_vx_bf16mf2(vs2, vs1, vl); |
| 22 | +} |
| 23 | + |
| 24 | +vbfloat16m1_t test_vrgather_vv_bf16m1(vbfloat16m1_t vs2, vuint16m1_t vs1, |
| 25 | + size_t vl) { |
| 26 | + return __riscv_vrgather_vv_bf16m1(vs2, vs1, vl); |
| 27 | +} |
| 28 | + |
| 29 | +vbfloat16m1_t test_vrgather_vx_bf16m1(vbfloat16m1_t vs2, size_t vs1, |
| 30 | + size_t vl) { |
| 31 | + return __riscv_vrgather_vx_bf16m1(vs2, vs1, vl); |
| 32 | +} |
| 33 | + |
| 34 | +vbfloat16m2_t test_vrgather_vv_bf16m2(vbfloat16m2_t vs2, vuint16m2_t vs1, |
| 35 | + size_t vl) { |
| 36 | + return __riscv_vrgather_vv_bf16m2(vs2, vs1, vl); |
| 37 | +} |
| 38 | + |
| 39 | +vbfloat16m2_t test_vrgather_vx_bf16m2(vbfloat16m2_t vs2, size_t vs1, |
| 40 | + size_t vl) { |
| 41 | + return __riscv_vrgather_vx_bf16m2(vs2, vs1, vl); |
| 42 | +} |
| 43 | + |
| 44 | +vbfloat16m4_t test_vrgather_vv_bf16m4(vbfloat16m4_t vs2, vuint16m4_t vs1, |
| 45 | + size_t vl) { |
| 46 | + return __riscv_vrgather_vv_bf16m4(vs2, vs1, vl); |
| 47 | +} |
| 48 | + |
| 49 | +vbfloat16m4_t test_vrgather_vx_bf16m4(vbfloat16m4_t vs2, size_t vs1, |
| 50 | + size_t vl) { |
| 51 | + return __riscv_vrgather_vx_bf16m4(vs2, vs1, vl); |
| 52 | +} |
| 53 | + |
| 54 | +vbfloat16m8_t test_vrgather_vv_bf16m8(vbfloat16m8_t vs2, vuint16m8_t vs1, |
| 55 | + size_t vl) { |
| 56 | + return __riscv_vrgather_vv_bf16m8(vs2, vs1, vl); |
| 57 | +} |
| 58 | + |
| 59 | +vbfloat16m8_t test_vrgather_vx_bf16m8(vbfloat16m8_t vs2, size_t vs1, |
| 60 | + size_t vl) { |
| 61 | + return __riscv_vrgather_vx_bf16m8(vs2, vs1, vl); |
| 62 | +} |
| 63 | + |
| 64 | +vbfloat16mf4_t test_vrgather_vv_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, |
| 65 | + vuint16mf4_t vs1, size_t vl) { |
| 66 | + return __riscv_vrgather_vv_bf16mf4_m(vm, vs2, vs1, vl); |
| 67 | +} |
| 68 | + |
| 69 | +vbfloat16mf4_t test_vrgather_vx_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, |
| 70 | + size_t vs1, size_t vl) { |
| 71 | + return __riscv_vrgather_vx_bf16mf4_m(vm, vs2, vs1, vl); |
| 72 | +} |
| 73 | + |
| 74 | +vbfloat16mf2_t test_vrgather_vv_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, |
| 75 | + vuint16mf2_t vs1, size_t vl) { |
| 76 | + return __riscv_vrgather_vv_bf16mf2_m(vm, vs2, vs1, vl); |
| 77 | +} |
| 78 | + |
| 79 | +vbfloat16mf2_t test_vrgather_vx_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, |
| 80 | + size_t vs1, size_t vl) { |
| 81 | + return __riscv_vrgather_vx_bf16mf2_m(vm, vs2, vs1, vl); |
| 82 | +} |
| 83 | + |
| 84 | +vbfloat16m1_t test_vrgather_vv_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2, |
| 85 | + vuint16m1_t vs1, size_t vl) { |
| 86 | + return __riscv_vrgather_vv_bf16m1_m(vm, vs2, vs1, vl); |
| 87 | +} |
| 88 | + |
| 89 | +vbfloat16m1_t test_vrgather_vx_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2, |
| 90 | + size_t vs1, size_t vl) { |
| 91 | + return __riscv_vrgather_vx_bf16m1_m(vm, vs2, vs1, vl); |
| 92 | +} |
| 93 | + |
| 94 | +vbfloat16m2_t test_vrgather_vv_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2, |
| 95 | + vuint16m2_t vs1, size_t vl) { |
| 96 | + return __riscv_vrgather_vv_bf16m2_m(vm, vs2, vs1, vl); |
| 97 | +} |
| 98 | + |
| 99 | +vbfloat16m2_t test_vrgather_vx_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2, |
| 100 | + size_t vs1, size_t vl) { |
| 101 | + return __riscv_vrgather_vx_bf16m2_m(vm, vs2, vs1, vl); |
| 102 | +} |
| 103 | + |
| 104 | +vbfloat16m4_t test_vrgather_vv_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2, |
| 105 | + vuint16m4_t vs1, size_t vl) { |
| 106 | + return __riscv_vrgather_vv_bf16m4_m(vm, vs2, vs1, vl); |
| 107 | +} |
| 108 | + |
| 109 | +vbfloat16m4_t test_vrgather_vx_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2, |
| 110 | + size_t vs1, size_t vl) { |
| 111 | + return __riscv_vrgather_vx_bf16m4_m(vm, vs2, vs1, vl); |
| 112 | +} |
| 113 | + |
| 114 | +vbfloat16m8_t test_vrgather_vv_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2, |
| 115 | + vuint16m8_t vs1, size_t vl) { |
| 116 | + return __riscv_vrgather_vv_bf16m8_m(vm, vs2, vs1, vl); |
| 117 | +} |
| 118 | + |
| 119 | +vbfloat16m8_t test_vrgather_vx_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2, |
| 120 | + size_t vs1, size_t vl) { |
| 121 | + return __riscv_vrgather_vx_bf16m8_m(vm, vs2, vs1, vl); |
| 122 | +} |
0 commit comments