qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 09/17] intel_iommu: Flush stage-1 cache in iotlb invalidat


From: Yi Liu
Subject: Re: [PATCH v4 09/17] intel_iommu: Flush stage-1 cache in iotlb invalidation
Date: Mon, 4 Nov 2024 15:36:14 +0800
User-agent: Mozilla Thunderbird

On 2024/11/4 11:38, Duan, Zhenzhong wrote:


-----Original Message-----
From: Liu, Yi L <yi.l.liu@intel.com>
Sent: Monday, November 4, 2024 10:51 AM
Subject: Re: [PATCH v4 09/17] intel_iommu: Flush stage-1 cache in iotlb
invalidation

On 2024/9/30 17:26, Zhenzhong Duan wrote:
According to spec, Page-Selective-within-Domain Invalidation (11b):

1. IOTLB entries caching second-stage mappings (PGTT=010b) or pass-through
(PGTT=100b) mappings associated with the specified domain-id and the
input-address range are invalidated.
2. IOTLB entries caching first-stage (PGTT=001b) or nested (PGTT=011b)
mapping associated with specified domain-id are invalidated.

So per spec definition the Page-Selective-within-Domain Invalidation
needs to flush first stage and nested cached IOTLB enties as well.

We don't support nested yet and pass-through mapping is never cached,
so what in iotlb cache are only first-stage and second-stage mappings.

a side question, how about cache paging structure?

We don't cache paging structures in current vIOMMU emulation code,
I thought the reason is it's cheap for vIOMMU to get paging structure
compared to real IOMMU hw. Even if we cache paging structure, we need
to compare address tag and read memory to get result, seems not much benefit.

never mind.



Add a tag pgtt in VTDIOTLBEntry to mark PGTT type of the mapping and
invalidate entries based on PGTT type.

Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
Reviewed-by: Clément Mathieu--Drif<clement.mathieu--drif@eviden.com>
Acked-by: Jason Wang <jasowang@redhat.com>
---
   include/hw/i386/intel_iommu.h |  1 +
   hw/i386/intel_iommu.c         | 27 +++++++++++++++++++++------
   2 files changed, 22 insertions(+), 6 deletions(-)

anyhow, this patch looks good to me.

Reviewed-by: Yi Liu <yi.l.liu@intel.com>

diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
index fe9057c50d..b843d069cc 100644
--- a/include/hw/i386/intel_iommu.h
+++ b/include/hw/i386/intel_iommu.h
@@ -155,6 +155,7 @@ struct VTDIOTLBEntry {
       uint64_t pte;
       uint64_t mask;
       uint8_t access_flags;
+    uint8_t pgtt;
   };

   /* VT-d Source-ID Qualifier types */
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 99bb3f42ea..46bde1ad40 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -305,9 +305,21 @@ static gboolean vtd_hash_remove_by_page(gpointer
key, gpointer value,
       VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
       uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
       uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
-    return (entry->domain_id == info->domain_id) &&
-            (((entry->gfn & info->mask) == gfn) ||
-             (entry->gfn == gfn_tlb));
+
+    if (entry->domain_id != info->domain_id) {
+        return false;
+    }
+
+    /*
+     * According to spec, IOTLB entries caching first-stage (PGTT=001b) or
+     * nested (PGTT=011b) mapping associated with specified domain-id are
+     * invalidated. Nested isn't supported yet, so only need to check 001b.
+     */
+    if (entry->pgtt == VTD_SM_PASID_ENTRY_FLT) {
+        return true;
+    }
+
+    return (entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb;
   }

   /* Reset all the gen of VTDAddressSpace to zero and set the gen of
@@ -382,7 +394,7 @@ out:
   static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
                                uint16_t domain_id, hwaddr addr, uint64_t pte,
                                uint8_t access_flags, uint32_t level,
-                             uint32_t pasid)
+                             uint32_t pasid, uint8_t pgtt)
   {
       VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
       struct vtd_iotlb_key *key = g_malloc(sizeof(*key));
@@ -400,6 +412,7 @@ static void vtd_update_iotlb(IntelIOMMUState *s,
uint16_t source_id,
       entry->access_flags = access_flags;
       entry->mask = vtd_pt_level_page_mask(level);
       entry->pasid = pasid;
+    entry->pgtt = pgtt;

       key->gfn = gfn;
       key->sid = source_id;
@@ -2069,7 +2082,7 @@ static bool
vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
       bool is_fpd_set = false;
       bool reads = true;
       bool writes = true;
-    uint8_t access_flags;
+    uint8_t access_flags, pgtt;
       bool rid2pasid = (pasid == PCI_NO_PASID) && s->root_scalable;
       VTDIOTLBEntry *iotlb_entry;

@@ -2177,9 +2190,11 @@ static bool
vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
       if (s->scalable_modern && s->root_scalable) {
           ret_fr = vtd_iova_to_flpte(s, &ce, addr, is_write, &pte, &level,
                                      &reads, &writes, s->aw_bits, pasid);
+        pgtt = VTD_SM_PASID_ENTRY_FLT;
       } else {
           ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &pte, &level,
                                      &reads, &writes, s->aw_bits, pasid);
+        pgtt = VTD_SM_PASID_ENTRY_SLT;
       }
       if (ret_fr) {
           vtd_report_fault(s, -ret_fr, is_fpd_set, source_id,
@@ -2190,7 +2205,7 @@ static bool
vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
       page_mask = vtd_pt_level_page_mask(level);
       access_flags = IOMMU_ACCESS_FLAG(reads, writes);
       vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce, pasid),
-                     addr, pte, access_flags, level, pasid);
+                     addr, pte, access_flags, level, pasid, pgtt);
   out:
       vtd_iommu_unlock(s);
       entry->iova = addr & page_mask;

--
Regards,
Yi Liu

--
Regards,
Yi Liu



reply via email to

[Prev in Thread] Current Thread [Next in Thread]