[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 02/16] target/alpha: Use MO_ALIGN where required
From: |
Richard Henderson |
Subject: |
[PATCH 02/16] target/alpha: Use MO_ALIGN where required |
Date: |
Tue, 2 May 2023 17:08:32 +0100 |
Mark all memory operations that are not already marked with UNALIGN.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/alpha/translate.c | 36 ++++++++++++++++++++----------------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index ffbac1c114..be8adb2526 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -2399,21 +2399,21 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access (hw_ldl/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL |
MO_ALIGN);
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ |
MO_ALIGN);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL |
MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ |
MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
@@ -2438,11 +2438,13 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
goto invalid_opc;
case 0xA:
/* Longword virtual access with protection check (hw_ldl/w) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
+ MO_LESL | MO_ALIGN);
break;
case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
+ MO_LEUQ | MO_ALIGN);
break;
case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/
@@ -2453,12 +2455,14 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
case 0xE:
/* Longword virtual access with alternate access mode and
protection checks (hw_ldl/wa) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
+ MO_LESL | MO_ALIGN);
break;
case 0xF:
/* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
+ MO_LEUQ | MO_ALIGN);
break;
}
break;
@@ -2659,7 +2663,7 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x1:
/* Quadword physical access */
@@ -2667,17 +2671,17 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x2:
/* Longword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
- MMU_PHYS_IDX, MO_LESL);
+ MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x3:
/* Quadword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
- MMU_PHYS_IDX, MO_LEUQ);
+ MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x4:
/* Longword virtual access */
@@ -2771,11 +2775,11 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
break;
case 0x2A:
/* LDL_L */
- gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1);
+ gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
break;
case 0x2B:
/* LDQ_L */
- gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
break;
case 0x2C:
/* STL */
@@ -2788,12 +2792,12 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
case 0x2E:
/* STL_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LESL);
+ ctx->mem_idx, MO_LESL | MO_ALIGN);
break;
case 0x2F:
/* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LEUQ);
+ ctx->mem_idx, MO_LEUQ | MO_ALIGN);
break;
case 0x30:
/* BR */
--
2.34.1
- [PATCH 00/16] tcg: Remove TARGET_ALIGNED_ONLY, Richard Henderson, 2023/05/02
- [PATCH 03/16] target/alpha: Remove TARGET_ALIGNED_ONLY, Richard Henderson, 2023/05/02
- [PATCH 08/16] target/mips: Use MO_ALIGN instead of 0, Richard Henderson, 2023/05/02
- [PATCH 02/16] target/alpha: Use MO_ALIGN where required,
Richard Henderson <=
- [PATCH 10/16] target/nios2: Remove TARGET_ALIGNED_ONLY, Richard Henderson, 2023/05/02
- [PATCH 11/16] target/sh4: Use MO_ALIGN where required, Richard Henderson, 2023/05/02
- [PATCH 13/16] target/sparc: Use MO_ALIGN where required, Richard Henderson, 2023/05/02
- [PATCH 01/16] target/alpha: Use MO_ALIGN for system UNALIGN(), Richard Henderson, 2023/05/02
- [PATCH 04/16] target/hppa: Use MO_ALIGN for system UNALIGN(), Richard Henderson, 2023/05/02
- [PATCH 07/16] target/mips: Add missing default_tcg_memop_mask, Richard Henderson, 2023/05/02
- [PATCH 09/16] target/mips: Remove TARGET_ALIGNED_ONLY, Richard Henderson, 2023/05/02