[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 21/39] tcg/loongarch64: Implement 128-bit load & store
From: |
Richard Henderson |
Subject: |
[PULL 21/39] tcg/loongarch64: Implement 128-bit load & store |
Date: |
Fri, 15 Sep 2023 20:29:53 -0700 |
From: Jiajie Chen <c@jia.je>
If LSX is available, use LSX instructions to implement 128-bit load &
store when MO_128 is required, otherwise use two 64-bit loads & stores.
Signed-off-by: Jiajie Chen <c@jia.je>
Message-Id: <20230908022302.180442-17-c@jia.je>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/loongarch64/tcg-target-con-set.h | 2 +
tcg/loongarch64/tcg-target.h | 2 +-
tcg/loongarch64/tcg-target.c.inc | 63 ++++++++++++++++++++++++++++
3 files changed, 66 insertions(+), 1 deletion(-)
diff --git a/tcg/loongarch64/tcg-target-con-set.h
b/tcg/loongarch64/tcg-target-con-set.h
index 914572d21b..77d62e38e7 100644
--- a/tcg/loongarch64/tcg-target-con-set.h
+++ b/tcg/loongarch64/tcg-target-con-set.h
@@ -18,6 +18,7 @@ C_O0_I1(r)
C_O0_I2(rZ, r)
C_O0_I2(rZ, rZ)
C_O0_I2(w, r)
+C_O0_I3(r, r, r)
C_O1_I1(r, r)
C_O1_I1(w, r)
C_O1_I1(w, w)
@@ -37,3 +38,4 @@ C_O1_I2(w, w, wM)
C_O1_I2(w, w, wA)
C_O1_I3(w, w, w, w)
C_O1_I4(r, rZ, rJ, rZ, rZ)
+C_O2_I1(r, r, r)
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
index 67b0a95532..03017672f6 100644
--- a/tcg/loongarch64/tcg-target.h
+++ b/tcg/loongarch64/tcg-target.h
@@ -171,7 +171,7 @@ extern bool use_lsx_instructions;
#define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_qemu_ldst_i128 use_lsx_instructions
#define TCG_TARGET_HAS_v64 0
#define TCG_TARGET_HAS_v128 use_lsx_instructions
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index 82901d678a..44682101fc 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -1081,6 +1081,52 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg
data_reg, TCGReg addr_reg,
}
}
+static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg
data_hi,
+ TCGReg addr_reg, MemOpIdx oi, bool is_ld)
+{
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
+
+ ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
+
+ if (h.aa.atom == MO_128) {
+ /*
+ * Use VLDX/VSTX when 128-bit atomicity is required.
+ * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
+ */
+ if (is_ld) {
+ tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
+ tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
+ tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
+ } else {
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
+ tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
+ }
+ } else {
+ /* Otherwise use a pair of LD/ST. */
+ TCGReg base = h.base;
+ if (h.index != TCG_REG_ZERO) {
+ base = TCG_REG_TMP0;
+ tcg_out_opc_add_d(s, base, h.base, h.index);
+ }
+ if (is_ld) {
+ tcg_out_opc_ld_d(s, data_lo, base, 0);
+ tcg_out_opc_ld_d(s, data_hi, base, 8);
+ } else {
+ tcg_out_opc_st_d(s, data_lo, base, 0);
+ tcg_out_opc_st_d(s, data_hi, base, 8);
+ }
+ }
+
+ if (ldst) {
+ ldst->type = TCG_TYPE_I128;
+ ldst->datalo_reg = data_lo;
+ ldst->datahi_reg = data_hi;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ }
+}
+
/*
* Entry-points
*/
@@ -1145,6 +1191,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGArg a0 = args[0];
TCGArg a1 = args[1];
TCGArg a2 = args[2];
+ TCGArg a3 = args[3];
int c2 = const_args[2];
switch (opc) {
@@ -1507,6 +1554,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_qemu_ld_a64_i64:
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
break;
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
+ break;
case INDEX_op_qemu_st_a32_i32:
case INDEX_op_qemu_st_a64_i32:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
@@ -1515,6 +1566,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_qemu_st_a64_i64:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
break;
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
+ break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
@@ -1996,6 +2051,14 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode
op)
case INDEX_op_qemu_st_a64_i64:
return C_O0_I2(rZ, r);
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ return C_O2_I1(r, r, r);
+
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
+ return C_O0_I3(r, r, r);
+
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return C_O0_I2(rZ, rZ);
--
2.34.1
- [PULL 12/39] tcg/loongarch64: Lower neg_vec to vneg, (continued)
- [PULL 12/39] tcg/loongarch64: Lower neg_vec to vneg, Richard Henderson, 2023/09/15
- [PULL 13/39] tcg/loongarch64: Lower mul_vec to vmul, Richard Henderson, 2023/09/15
- [PULL 15/39] tcg/loongarch64: Lower vector saturated ops, Richard Henderson, 2023/09/15
- [PULL 16/39] tcg/loongarch64: Lower vector shift vector ops, Richard Henderson, 2023/09/15
- [PULL 18/39] tcg/loongarch64: Lower vector shift integer ops, Richard Henderson, 2023/09/15
- [PULL 06/39] tcg/loongarch64: Import LSX instructions, Richard Henderson, 2023/09/15
- [PULL 14/39] tcg/loongarch64: Lower vector min max ops, Richard Henderson, 2023/09/15
- [PULL 17/39] tcg/loongarch64: Lower bitsel_vec to vbitsel, Richard Henderson, 2023/09/15
- [PULL 19/39] tcg/loongarch64: Lower rotv_vec ops to LSX, Richard Henderson, 2023/09/15
- [PULL 20/39] tcg/loongarch64: Lower rotli_vec to vrotri, Richard Henderson, 2023/09/15
- [PULL 21/39] tcg/loongarch64: Implement 128-bit load & store,
Richard Henderson <=
- [PULL 22/39] tcg: Add gvec compare with immediate and scalar operand, Richard Henderson, 2023/09/15
- [PULL 23/39] target/arm: Use tcg_gen_gvec_cmpi for compare vs 0, Richard Henderson, 2023/09/15
- [PULL 24/39] accel/tcg: Simplify tlb_plugin_lookup, Richard Henderson, 2023/09/15
- [PULL 25/39] accel/tcg: Split out io_prepare and io_failed, Richard Henderson, 2023/09/15
- [PULL 27/39] plugin: Simplify struct qemu_plugin_hwaddr, Richard Henderson, 2023/09/15
- [PULL 28/39] accel/tcg: Merge cpu_transaction_failed into io_failed, Richard Henderson, 2023/09/15
- [PULL 26/39] accel/tcg: Use CPUTLBEntryFull.phys_addr in io_failed, Richard Henderson, 2023/09/15
- [PULL 29/39] accel/tcg: Replace direct use of io_readx/io_writex in do_{ld, st}_1, Richard Henderson, 2023/09/15
- [PULL 31/39] accel/tcg: Merge io_writex into do_st_mmio_leN, Richard Henderson, 2023/09/15
- [PULL 30/39] accel/tcg: Merge io_readx into do_ld_mmio_beN, Richard Henderson, 2023/09/15