qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v5 08/17] accel/tcg: Move byte_swap from MemTxAttrs to CPUTLBEntr


From: Richard Henderson
Subject: [PATCH v5 08/17] accel/tcg: Move byte_swap from MemTxAttrs to CPUTLBEntryFull
Date: Sun, 25 Sep 2022 10:51:15 +0000

We had previously placed this bit in MemTxAttrs because we had
no other way to communicate that information to tlb_set_page*.
The bit is not relevant to memory transactions, only page table
entries, and now we do have a way to pass in the bit.

Cc: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/exec/cpu-all.h    | 6 +++---
 include/exec/cpu-defs.h   | 3 +++
 include/exec/memattrs.h   | 2 --
 accel/tcg/cputlb.c        | 8 ++++----
 target/sparc/mmu_helper.c | 2 +-
 5 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 491629b9ba..064aa5aee8 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -384,8 +384,8 @@ CPUArchState *cpu_copy(CPUArchState *env);
 #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 3))
 /* Set if TLB entry contains a watchpoint.  */
 #define TLB_WATCHPOINT      (1 << (TARGET_PAGE_BITS_MIN - 4))
-/* Set if TLB entry requires byte swap.  */
-#define TLB_BSWAP           (1 << (TARGET_PAGE_BITS_MIN - 5))
+/* Set if TLB entry requires slow path handling.  */
+#define TLB_SLOW_PATH       (1 << (TARGET_PAGE_BITS_MIN - 5))
 /* Set if TLB entry writes ignored.  */
 #define TLB_DISCARD_WRITE   (1 << (TARGET_PAGE_BITS_MIN - 6))
 
@@ -394,7 +394,7 @@ CPUArchState *cpu_copy(CPUArchState *env);
  */
 #define TLB_FLAGS_MASK \
     (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
-    | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
+    | TLB_WATCHPOINT | TLB_SLOW_PATH | TLB_DISCARD_WRITE)
 
 /**
  * tlb_hit_page: return true if page aligned @addr is a hit against the
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 67239b4e5e..7c0ba93826 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -164,6 +164,9 @@ typedef struct CPUTLBEntryFull {
     /* @lg_page_size contains the log2 of the page size. */
     uint8_t lg_page_size;
 
+    /* @byte_swap indicates that all accesses use inverted endianness. */
+    bool byte_swap;
+
     /*
      * Allow target-specific additions to this structure.
      * This may be used to cache items from the guest cpu
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
index 9fb98bc1ef..570e73c06f 100644
--- a/include/exec/memattrs.h
+++ b/include/exec/memattrs.h
@@ -45,8 +45,6 @@ typedef struct MemTxAttrs {
     unsigned int memory:1;
     /* Requester ID (for MSI for example) */
     unsigned int requester_id:16;
-    /* Invert endianness for this page */
-    unsigned int byte_swap:1;
     /*
      * The following are target-specific page-table bits.  These are not
      * related to actual memory transactions at all.  However, this structure
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 382c5d3109..1a5a6bd98b 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1146,8 +1146,8 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
         /* Repeat the MMU check and TLB fill on every access.  */
         address |= TLB_INVALID_MASK;
     }
-    if (full->attrs.byte_swap) {
-        address |= TLB_BSWAP;
+    if (full->byte_swap) {
+        address |= TLB_SLOW_PATH;
     }
 
     is_ram = memory_region_is_ram(section->mr);
@@ -1961,7 +1961,7 @@ load_helper(CPUArchState *env, target_ulong addr, 
MemOpIdx oi,
                                  full->attrs, BP_MEM_READ, retaddr);
         }
 
-        need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
+        need_swap = size > 1 && full->byte_swap;
 
         /* Handle I/O access.  */
         if (likely(tlb_addr & TLB_MMIO)) {
@@ -2366,7 +2366,7 @@ store_helper(CPUArchState *env, target_ulong addr, 
uint64_t val,
                                  full->attrs, BP_MEM_WRITE, retaddr);
         }
 
-        need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
+        need_swap = size > 1 && full->byte_swap;
 
         /* Handle I/O access.  */
         if (tlb_addr & TLB_MMIO) {
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
index 08f656cbb6..a857bd9569 100644
--- a/target/sparc/mmu_helper.c
+++ b/target/sparc/mmu_helper.c
@@ -577,7 +577,7 @@ static int get_physical_address_data(CPUSPARCState *env, 
CPUTLBEntryFull *full,
             int do_fault = 0;
 
             if (TTE_IS_IE(env->dtlb[i].tte)) {
-                full->attrs.byte_swap = true;
+                full->byte_swap = true;
             }
 
             /* access ok? */
-- 
2.34.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]