[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 27/37] target/i386: Use tcg gvec ops for pmovmskb
From: |
Paolo Bonzini |
Subject: |
[PATCH 27/37] target/i386: Use tcg gvec ops for pmovmskb |
Date: |
Mon, 12 Sep 2022 01:04:07 +0200 |
From: Richard Henderson <richard.henderson@linaro.org>
As pmovmskb is used by strlen et al, this is the third
highest overhead sse operation at %0.8.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
[Reorganize to generate code for any vector size. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/tcg/emit.c.inc | 65 +++++++++++++++++++++++++++++++++++---
1 file changed, 60 insertions(+), 5 deletions(-)
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index dbf2c05e16..52c0a7fbe0 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -1179,14 +1179,69 @@ static void gen_PINSR(DisasContext *s, CPUX86State
*env, X86DecodedInsn *decode)
gen_pinsr(s, env, decode, decode->op[2].ot);
}
+static void gen_pmovmskb_i64(TCGv_i64 d, TCGv_i64 s)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(d, s, 0x8080808080808080ull);
+
+ /*
+ * After each shift+or pair:
+ * 0: a.......b.......c.......d.......e.......f.......g.......h.......
+ * 7: ab......bc......cd......de......ef......fg......gh......h.......
+ * 14: abcd....bcde....cdef....defg....efgh....fgh.....gh......h.......
+ * 28: abcdefghbcdefgh.cdefgh..defgh...efgh....fgh.....gh......h.......
+ * The result is left in the high bits of the word.
+ */
+ tcg_gen_shli_i64(t, d, 7);
+ tcg_gen_or_i64(d, d, t);
+ tcg_gen_shli_i64(t, d, 14);
+ tcg_gen_or_i64(d, d, t);
+ tcg_gen_shli_i64(t, d, 28);
+ tcg_gen_or_i64(d, d, t);
+}
+
+static void gen_pmovmskb_vec(unsigned vece, TCGv_vec d, TCGv_vec s)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ TCGv_vec m = tcg_constant_vec_matching(d, MO_8, 0x80);
+
+ /* See above */
+ tcg_gen_and_vec(vece, d, s, m);
+ tcg_gen_shli_vec(vece, t, d, 7);
+ tcg_gen_or_vec(vece, d, d, t);
+ tcg_gen_shli_vec(vece, t, d, 14);
+ tcg_gen_or_vec(vece, d, d, t);
+ if (vece == MO_64) {
+ tcg_gen_shli_vec(vece, t, d, 28);
+ tcg_gen_or_vec(vece, d, d, t);
+ }
+}
+
static void gen_PMOVMSKB(DisasContext *s, CPUX86State *env, X86DecodedInsn
*decode)
{
- if (s->prefix & PREFIX_DATA) {
- gen_helper_pmovmskb_xmm(s->tmp2_i32, cpu_env, s->ptr2);
- } else {
- gen_helper_pmovmskb_mmx(s->tmp2_i32, cpu_env, s->ptr2);
+ static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
+ static const GVecGen2 g = {
+ .fni8 = gen_pmovmskb_i64,
+ .fniv = gen_pmovmskb_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64
+ };
+ MemOp ot = decode->op[0].ot;
+ int vec_len = sse_vec_len(s, decode);
+ TCGv t = tcg_temp_new();
+
+ tcg_gen_gvec_2(offsetof(CPUX86State, xmm_t0) + xmm_offset(ot),
decode->op[2].offset,
+ vec_len, vec_len, &g);
+ tcg_gen_ld8u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len
- 1)));
+ while (vec_len > 8) {
+ vec_len -= 8;
+ tcg_gen_shli_tl(s->T0, s->T0, 8);
+ tcg_gen_ld8u_tl(t, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len
- 1)));
+ tcg_gen_or_tl(s->T0, s->T0, t);
}
- tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
+ tcg_temp_free(t);
}
static void gen_POR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
--
2.37.2
- Re: [PATCH 20/37] target/i386: reimplement 0x0f 0x60-0x6f, add AVX, (continued)
Re: [PATCH 20/37] target/i386: reimplement 0x0f 0x60-0x6f, add AVX, Richard Henderson, 2022/09/12
[PATCH 23/37] target/i386: reimplement 0x0f 0x78-0x7f, add AVX, Paolo Bonzini, 2022/09/11
[PATCH 25/37] target/i386: reimplement 0x0f 0xd0-0xd7, 0xe0-0xe7, 0xf0-0xf7, add AVX, Paolo Bonzini, 2022/09/11
[PATCH 24/37] target/i386: reimplement 0x0f 0x70-0x77, add AVX, Paolo Bonzini, 2022/09/11
[PATCH 27/37] target/i386: Use tcg gvec ops for pmovmskb,
Paolo Bonzini <=
[PATCH 26/37] target/i386: reimplement 0x0f 0x3a, add AVX, Paolo Bonzini, 2022/09/11
[PATCH 28/37] target/i386: reimplement 0x0f 0x38, add AVX, Paolo Bonzini, 2022/09/11
[PATCH 29/37] target/i386: reimplement 0x0f 0xc2, 0xc4-0xc6, add AVX, Paolo Bonzini, 2022/09/11