qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v5 09/17] accel/tcg: Add force_aligned to CPUTLBEntryFull


From: Richard Henderson
Subject: [PATCH v5 09/17] accel/tcg: Add force_aligned to CPUTLBEntryFull
Date: Sun, 25 Sep 2022 10:51:16 +0000

Support per-page natural alignment checking.  This will be
used by Arm for pages mapped with memory type Device.

Cc: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/exec/cpu-defs.h |  3 +++
 accel/tcg/cputlb.c      | 20 +++++++++++++-------
 2 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 7c0ba93826..d0acbb4d35 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -167,6 +167,9 @@ typedef struct CPUTLBEntryFull {
     /* @byte_swap indicates that all accesses use inverted endianness. */
     bool byte_swap;
 
+    /* @force_aligned indicates that all accesses must be aligned. */
+    bool force_aligned;
+
     /*
      * Allow target-specific additions to this structure.
      * This may be used to cache items from the guest cpu
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 1a5a6bd98b..01a89b4a1f 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1146,7 +1146,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
         /* Repeat the MMU check and TLB fill on every access.  */
         address |= TLB_INVALID_MASK;
     }
-    if (full->byte_swap) {
+    if (full->byte_swap || full->force_aligned) {
         address |= TLB_SLOW_PATH;
     }
 
@@ -1944,16 +1944,19 @@ load_helper(CPUArchState *env, target_ulong addr, 
MemOpIdx oi,
 
     /* Handle anything that isn't just a straight memory access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        CPUTLBEntryFull *full;
+        CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
         bool need_swap;
 
         /* For anything that is unaligned, recurse through full_load.  */
         if ((addr & (size - 1)) != 0) {
+            /* Honor per-page alignment requirements. */
+            if (full->force_aligned) {
+                cpu_unaligned_access(env_cpu(env), addr, access_type,
+                                     mmu_idx, retaddr);
+            }
             goto do_unaligned_access;
         }
 
-        full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
-
         /* Handle watchpoints.  */
         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
             /* On watchpoint hit, this will longjmp out.  */
@@ -2349,16 +2352,19 @@ store_helper(CPUArchState *env, target_ulong addr, 
uint64_t val,
 
     /* Handle anything that isn't just a straight memory access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        CPUTLBEntryFull *full;
+        CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
         bool need_swap;
 
         /* For anything that is unaligned, recurse through byte stores.  */
         if ((addr & (size - 1)) != 0) {
+            /* Honor per-page alignment requirements. */
+            if (full->force_aligned) {
+                cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
+                                     mmu_idx, retaddr);
+            }
             goto do_unaligned_access;
         }
 
-        full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
-
         /* Handle watchpoints.  */
         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
             /* On watchpoint hit, this will longjmp out.  */
-- 
2.34.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]