qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC PATCH v9 09/23] vhost: Add SVQElement


From: Jason Wang
Subject: Re: [RFC PATCH v9 09/23] vhost: Add SVQElement
Date: Mon, 11 Jul 2022 17:00:43 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:91.0) Gecko/20100101 Thunderbird/91.11.0


在 2022/7/7 02:39, Eugenio Pérez 写道:
This will allow SVQ to add metadata to the different queue elements. To
simplify changes, only store actual element at this patch.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
  hw/virtio/vhost-shadow-virtqueue.h |  8 ++++--
  hw/virtio/vhost-shadow-virtqueue.c | 41 ++++++++++++++++++++----------
  2 files changed, 33 insertions(+), 16 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h 
b/hw/virtio/vhost-shadow-virtqueue.h
index 0fbdd69153..e434dc63b0 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -15,6 +15,10 @@
  #include "standard-headers/linux/vhost_types.h"
  #include "hw/virtio/vhost-iova-tree.h"
+typedef struct SVQElement {
+    VirtQueueElement *elem;
+} SVQElement;
+
  typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
  typedef int (*ShadowVirtQueueStart)(VhostShadowVirtqueue *svq,
                                      void *opaque);
@@ -55,8 +59,8 @@ typedef struct VhostShadowVirtqueue {
      /* IOVA mapping */
      VhostIOVATree *iova_tree;
- /* Map for use the guest's descriptors */
-    VirtQueueElement **ring_id_maps;
+    /* Each element context */
+    SVQElement *ring_id_maps;
/* Next VirtQueue element that guest made available */
      VirtQueueElement *next_guest_avail_elem;
diff --git a/hw/virtio/vhost-shadow-virtqueue.c 
b/hw/virtio/vhost-shadow-virtqueue.c
index 46d3c1d74f..913bca8769 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -237,7 +237,7 @@ static bool vhost_svq_add(VhostShadowVirtqueue *svq, const 
struct iovec *out_sg,
          return false;
      }
- svq->ring_id_maps[qemu_head] = elem;
+    svq->ring_id_maps[qemu_head].elem = elem;
      return true;
  }
@@ -385,15 +385,25 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
      return i;
  }
-static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
-                                           uint32_t *len)
+static bool vhost_svq_is_empty_elem(SVQElement elem)
+{
+    return elem.elem == NULL;
+}
+
+static SVQElement vhost_svq_empty_elem(void)
+{
+    return (SVQElement){};
+}


I wonder what's the benefit of using this instead of passing pointer to SVQElement and using memset().


+
+static SVQElement vhost_svq_get_buf(VhostShadowVirtqueue *svq, uint32_t *len)
  {
      const vring_used_t *used = svq->vring.used;
      vring_used_elem_t used_elem;
+    SVQElement svq_elem = vhost_svq_empty_elem();
      uint16_t last_used, last_used_chain, num;
if (!vhost_svq_more_used(svq)) {
-        return NULL;
+        return svq_elem;
      }
/* Only get used array entries after they have been exposed by dev */
@@ -406,24 +416,25 @@ static VirtQueueElement 
*vhost_svq_get_buf(VhostShadowVirtqueue *svq,
      if (unlikely(used_elem.id >= svq->vring.num)) {
          qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used",
                        svq->vdev->name, used_elem.id);
-        return NULL;
+        return svq_elem;
      }
- if (unlikely(!svq->ring_id_maps[used_elem.id])) {
+    svq_elem = svq->ring_id_maps[used_elem.id];
+    svq->ring_id_maps[used_elem.id] = vhost_svq_empty_elem();
+    if (unlikely(vhost_svq_is_empty_elem(svq_elem))) {


Any reason we can't simply assign NULL to ring_id_maps[used_elem.id]?

Thanks


          qemu_log_mask(LOG_GUEST_ERROR,
              "Device %s says index %u is used, but it was not available",
              svq->vdev->name, used_elem.id);
-        return NULL;
+        return svq_elem;
      }
- num = svq->ring_id_maps[used_elem.id]->in_num +
-          svq->ring_id_maps[used_elem.id]->out_num;
+    num = svq_elem.elem->in_num + svq_elem.elem->out_num;
      last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
      svq->desc_next[last_used_chain] = svq->free_head;
      svq->free_head = used_elem.id;
*len = used_elem.len;
-    return g_steal_pointer(&svq->ring_id_maps[used_elem.id]);
+    return svq_elem;
  }
/**
@@ -454,6 +465,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
          vhost_svq_disable_notification(svq);
          while (true) {
              uint32_t len;
+            SVQElement svq_elem;
              g_autofree VirtQueueElement *elem = NULL;
if (unlikely(i >= svq->vring.num)) {
@@ -464,11 +476,12 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
                  return;
              }
- elem = vhost_svq_get_buf(svq, &len);
-            if (!elem) {
+            svq_elem = vhost_svq_get_buf(svq, &len);
+            if (vhost_svq_is_empty_elem(svq_elem)) {
                  break;
              }
+ elem = g_steal_pointer(&svq_elem.elem);
              virtqueue_fill(vq, elem, len, i++);
          }
@@ -611,7 +624,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
      memset(svq->vring.desc, 0, driver_size);
      svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
      memset(svq->vring.used, 0, device_size);
-    svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
+    svq->ring_id_maps = g_new0(SVQElement, svq->vring.num);
      svq->desc_next = g_new0(uint16_t, svq->vring.num);
      for (unsigned i = 0; i < svq->vring.num - 1; i++) {
          svq->desc_next[i] = cpu_to_le16(i + 1);
@@ -636,7 +649,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
for (unsigned i = 0; i < svq->vring.num; ++i) {
          g_autofree VirtQueueElement *elem = NULL;
-        elem = g_steal_pointer(&svq->ring_id_maps[i]);
+        elem = g_steal_pointer(&svq->ring_id_maps[i].elem);
          if (elem) {
              virtqueue_detach_element(svq->vq, elem, 0);
          }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]