[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC v4 8/9] tcg-aarch64: Implement excl variants of qemu_{
From: |
Alvise Rigo |
Subject: |
[Qemu-devel] [RFC v4 8/9] tcg-aarch64: Implement excl variants of qemu_{ld, st} |
Date: |
Fri, 7 Aug 2015 19:03:14 +0200 |
Implement the exclusive variants of qemu_{ld,st}_{i32,i64} for
tcg-aarch64.
The lookup for the proper memory helper has been rewritten to take
into account the new exclusive helpers.
Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
tcg/aarch64/tcg-target.c | 99 ++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 92 insertions(+), 7 deletions(-)
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index fe44ad7..790ae90 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -971,6 +971,32 @@ static void * const qemu_ld_helpers[16] = {
[MO_BEQ] = helper_be_ldq_mmu,
};
+/* LoadLink helpers, only unsigned. Use the macro below to access them. */
+static void * const qemu_ldex_helpers[16] = {
+ [MO_UB] = helper_ret_ldlinkub_mmu,
+
+ [MO_LEUW] = helper_le_ldlinkuw_mmu,
+ [MO_LEUL] = helper_le_ldlinkul_mmu,
+ [MO_LEQ] = helper_le_ldlinkq_mmu,
+
+ [MO_BEUW] = helper_be_ldlinkuw_mmu,
+ [MO_BEUL] = helper_be_ldlinkul_mmu,
+ [MO_BEQ] = helper_be_ldlinkq_mmu,
+};
+
+static inline tcg_insn_unit *ld_helper(TCGMemOp opc)
+{
+ if (opc & MO_EXCL) {
+ /* No signed-extended exclusive variants for ARM. */
+ assert(!(opc & MO_SIGN));
+
+ return qemu_ldex_helpers[((int)opc - MO_EXCL) & (MO_BSWAP | MO_SIZE)];
+ }
+
+ return qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)];
+}
+
+
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
* uintxx_t val, TCGMemOpIdx oi,
* uintptr_t ra)
@@ -985,6 +1011,26 @@ static void * const qemu_st_helpers[16] = {
[MO_BEQ] = helper_be_stq_mmu,
};
+/* StoreConditional helpers. Use the macro below to access them. */
+static void * const qemu_stex_helpers[16] = {
+ [MO_UB] = helper_ret_stcondb_mmu,
+ [MO_LEUW] = helper_le_stcondw_mmu,
+ [MO_LEUL] = helper_le_stcondl_mmu,
+ [MO_LEQ] = helper_le_stcondq_mmu,
+ [MO_BEUW] = helper_be_stcondw_mmu,
+ [MO_BEUL] = helper_be_stcondl_mmu,
+ [MO_BEQ] = helper_be_stcondq_mmu,
+};
+
+static inline tcg_insn_unit *st_helper(TCGMemOp opc)
+{
+ if (opc & MO_EXCL) {
+ return qemu_stex_helpers[((int)opc - MO_EXCL) & (MO_BSWAP | MO_SSIZE)];
+ }
+
+ return qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)];
+}
+
static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
{
ptrdiff_t offset = tcg_pcrel_diff(s, target);
@@ -1004,7 +1050,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s,
TCGLabelQemuLdst *lb)
tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi);
tcg_out_adr(s, TCG_REG_X3, lb->raddr);
- tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
+ tcg_out_call(s, ld_helper(opc));
if (opc & MO_SIGN) {
tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0);
} else {
@@ -1027,17 +1073,23 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s,
TCGLabelQemuLdst *lb)
tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi);
tcg_out_adr(s, TCG_REG_X4, lb->raddr);
- tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
+
+ tcg_out_call(s, st_helper(opc));
+ if (opc & MO_EXCL) {
+ tcg_out_mov(s, TCG_TYPE_I32, lb->llsc_success, TCG_REG_X0);
+ }
tcg_out_goto(s, lb->raddr);
}
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
TCGType ext, TCGReg data_reg, TCGReg addr_reg,
- tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
+ tcg_insn_unit *raddr, tcg_insn_unit *label_ptr,
+ TCGReg llsc_success)
{
TCGLabelQemuLdst *label = new_ldst_label(s);
label->is_ld = is_ld;
+ label->llsc_success = llsc_success;
label->oi = oi;
label->type = ext;
label->datalo_reg = data_reg;
@@ -1206,10 +1258,18 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg
data_reg, TCGReg addr_reg,
TCGMemOp s_bits = memop & MO_SIZE;
tcg_insn_unit *label_ptr;
- tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
- tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_REG_X1);
+ if (memop & MO_EXCL) {
+ /* If this is a LL access, we don't read the TLB but we always follow
+ * the slow path. */
+ label_ptr = s->code_ptr;
+ tcg_out_goto_cond_noaddr(s, TCG_COND_ALWAYS);
+ } else {
+ tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
+ tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_REG_X1);
+ }
+
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
- s->code_ptr, label_ptr);
+ s->code_ptr, label_ptr, 0);
#else /* !CONFIG_SOFTMMU */
tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg,
GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR);
@@ -1225,16 +1285,32 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg
data_reg, TCGReg addr_reg,
TCGMemOp s_bits = memop & MO_SIZE;
tcg_insn_unit *label_ptr;
+
tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0);
tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1);
add_qemu_ldst_label(s, false, oi, s_bits == MO_64, data_reg, addr_reg,
- s->code_ptr, label_ptr);
+ s->code_ptr, label_ptr, 0);
#else /* !CONFIG_SOFTMMU */
tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg,
GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR);
#endif /* CONFIG_SOFTMMU */
}
+static void tcg_out_qemu_stcond(TCGContext *s, TCGReg llsc_success,
+ TCGReg data_reg, TCGReg addr_reg,
+ TCGMemOpIdx oi)
+{
+ TCGMemOp memop = get_memop(oi);
+ TCGMemOp s_bits = memop & MO_SIZE;
+ tcg_insn_unit *label_ptr;
+
+ label_ptr = s->code_ptr;
+ tcg_out_goto_cond_noaddr(s, TCG_COND_ALWAYS);
+
+ add_qemu_ldst_label(s, false, oi, s_bits == MO_64, data_reg, addr_reg,
+ s->code_ptr, label_ptr, llsc_success);
+}
+
static tcg_insn_unit *tb_ret_addr;
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
@@ -1526,6 +1602,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, REG0(0), a1, a2);
break;
+ case INDEX_op_qemu_stcond_i32:
+ case INDEX_op_qemu_stcond_i64:
+ if (false)
+ tcg_out_qemu_st(s, REG0(0), a1, a2);
+ else
+ tcg_out_qemu_stcond(s, REG0(0), a1, a2, args[3]);
+ break;
case INDEX_op_bswap64_i64:
tcg_out_rev64(s, a0, a1);
@@ -1684,7 +1767,9 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ INDEX_op_qemu_ld_i32, { "r", "l" } },
{ INDEX_op_qemu_ld_i64, { "r", "l" } },
{ INDEX_op_qemu_st_i32, { "lZ", "l" } },
+ { INDEX_op_qemu_stcond_i32, { "r", "lZ", "l" } },
{ INDEX_op_qemu_st_i64, { "lZ", "l" } },
+ { INDEX_op_qemu_stcond_i64, { "r", "lZ", "l" } },
{ INDEX_op_bswap16_i32, { "r", "r" } },
{ INDEX_op_bswap32_i32, { "r", "r" } },
--
2.5.0
- Re: [Qemu-devel] [RFC v4 1/9] exec.c: Add new exclusive bitmap to ram_list, (continued)
- Re: [Qemu-devel] [RFC v4 1/9] exec.c: Add new exclusive bitmap to ram_list, alvise rigo, 2015/08/11
- Re: [Qemu-devel] [RFC v4 1/9] exec.c: Add new exclusive bitmap to ram_list, Paolo Bonzini, 2015/08/11
- Re: [Qemu-devel] [RFC v4 1/9] exec.c: Add new exclusive bitmap to ram_list, alvise rigo, 2015/08/12
- Re: [Qemu-devel] [RFC v4 1/9] exec.c: Add new exclusive bitmap to ram_list, Paolo Bonzini, 2015/08/12
- Re: [Qemu-devel] [RFC v4 1/9] exec.c: Add new exclusive bitmap to ram_list, Peter Maydell, 2015/08/12
- Re: [Qemu-devel] [RFC v4 1/9] exec.c: Add new exclusive bitmap to ram_list, alvise rigo, 2015/08/12
- Re: [Qemu-devel] [RFC v4 1/9] exec.c: Add new exclusive bitmap to ram_list, Paolo Bonzini, 2015/08/12
- Re: [Qemu-devel] [RFC v4 1/9] exec.c: Add new exclusive bitmap to ram_list, alvise rigo, 2015/08/12
[Qemu-devel] [RFC v4 2/9] softmmu: Add new TLB_EXCL flag, Alvise Rigo, 2015/08/07
[Qemu-devel] [RFC v4 7/9] tcg-arm: Implement excl variants of qemu_{ld, st}, Alvise Rigo, 2015/08/07
[Qemu-devel] [RFC v4 8/9] tcg-aarch64: Implement excl variants of qemu_{ld, st},
Alvise Rigo <=
[Qemu-devel] [RFC v4 6/9] tcg-i386: Implement excl variants of qemu_{ld, st}, Alvise Rigo, 2015/08/07
[Qemu-devel] [RFC v4 9/9] target-arm: translate: Use ld/st excl for atomic insns, Alvise Rigo, 2015/08/07