[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH qemu v6 09/10] target/riscv: rvv: Add mask agnostic for vecto
From: |
Alistair Francis |
Subject: |
Re: [PATCH qemu v6 09/10] target/riscv: rvv: Add mask agnostic for vector permutation instructions |
Date: |
Thu, 21 Jul 2022 10:23:50 +1000 |
On Mon, Jun 20, 2022 at 4:59 PM ~eopxd <eopxd@git.sr.ht> wrote:
>
> From: Yueh-Ting (eop) Chen <eop.chen@sifive.com>
>
> Signed-off-by: eop Chen <eop.chen@sifive.com>
> Reviewed-by: Frank Chang <frank.chang@sifive.com>
> Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/insn_trans/trans_rvv.c.inc | 1 +
> target/riscv/vector_helper.c | 26 +++++++++++++++++++++++--
> 2 files changed, 25 insertions(+), 2 deletions(-)
>
> diff --git a/target/riscv/insn_trans/trans_rvv.c.inc
> b/target/riscv/insn_trans/trans_rvv.c.inc
> index c1bd29329e..e58208f363 100644
> --- a/target/riscv/insn_trans/trans_rvv.c.inc
> +++ b/target/riscv/insn_trans/trans_rvv.c.inc
> @@ -3891,6 +3891,7 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a,
> uint8_t seq)
> data = FIELD_DP32(data, VDATA, VM, a->vm);
> data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
> data = FIELD_DP32(data, VDATA, VTA, s->vta);
> + data = FIELD_DP32(data, VDATA, VMA, s->vma);
>
> tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
> vreg_ofs(s, a->rs2), cpu_env,
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index 52518648bb..d224861c2c 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -5018,11 +5018,14 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong
> s1, void *vs2, \
> uint32_t esz = sizeof(ETYPE); \
> uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> uint32_t vta = vext_vta(desc); \
> + uint32_t vma = vext_vma(desc); \
> target_ulong offset = s1, i_min, i; \
> \
> i_min = MAX(env->vstart, offset); \
> for (i = i_min; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> + /* set masked-off elements to 1s */ \
> + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
> @@ -5047,13 +5050,17 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong
> s1, void *vs2, \
> uint32_t esz = sizeof(ETYPE); \
> uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> uint32_t vta = vext_vta(desc); \
> + uint32_t vma = vext_vma(desc); \
> target_ulong i_max, i; \
> \
> i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart); \
> for (i = env->vstart; i < i_max; ++i) { \
> - if (vm || vext_elem_mask(v0, i)) { \
> - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \
> + if (!vm && !vext_elem_mask(v0, i)) { \
> + /* set masked-off elements to 1s */ \
> + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + continue; \
> } \
> + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \
> } \
> \
> for (i = i_max; i < vl; ++i) { \
> @@ -5083,10 +5090,13 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0,
> target_ulong s1, \
> uint32_t esz = sizeof(ETYPE); \
> uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> uint32_t vta = vext_vta(desc); \
> + uint32_t vma = vext_vma(desc); \
> uint32_t i; \
> \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> + /* set masked-off elements to 1s */ \
> + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> if (i == 0) { \
> @@ -5128,10 +5138,13 @@ static void vslide1down_##BITWIDTH(void *vd, void
> *v0, target_ulong s1, \
> uint32_t esz = sizeof(ETYPE);
> \
> uint32_t total_elems = vext_get_total_elems(env, desc, esz);
> \
> uint32_t vta = vext_vta(desc);
> \
> + uint32_t vma = vext_vma(desc);
> \
> uint32_t i;
> \
>
> \
> for (i = env->vstart; i < vl; i++) {
> \
> if (!vm && !vext_elem_mask(v0, i)) {
> \
> + /* set masked-off elements to 1s */
> \
> + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
> \
> continue;
> \
> }
> \
> if (i == vl - 1) {
> \
> @@ -5199,11 +5212,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
> *vs2, \
> uint32_t esz = sizeof(TS2); \
> uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> uint32_t vta = vext_vta(desc); \
> + uint32_t vma = vext_vma(desc); \
> uint64_t index; \
> uint32_t i; \
> \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> + /* set masked-off elements to 1s */ \
> + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> index = *((TS1 *)vs1 + HS1(i)); \
> @@ -5239,11 +5255,14 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong
> s1, void *vs2, \
> uint32_t esz = sizeof(ETYPE); \
> uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> uint32_t vta = vext_vta(desc); \
> + uint32_t vma = vext_vma(desc); \
> uint64_t index = s1; \
> uint32_t i; \
> \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> + /* set masked-off elements to 1s */ \
> + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> if (index >= vlmax) { \
> @@ -5318,10 +5337,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,
> \
> uint32_t esz = sizeof(ETYPE); \
> uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> uint32_t vta = vext_vta(desc); \
> + uint32_t vma = vext_vma(desc); \
> uint32_t i; \
> \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> + /* set masked-off elements to 1s */ \
> + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
> --
> 2.34.2
>
>
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- Re: [PATCH qemu v6 09/10] target/riscv: rvv: Add mask agnostic for vector permutation instructions,
Alistair Francis <=