qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v5 17/26] vhost: Route host->guest notification through shado


From: Eugenio Pérez
Subject: [RFC PATCH v5 17/26] vhost: Route host->guest notification through shadow virtqueue
Date: Fri, 29 Oct 2021 20:35:16 +0200

This will make qemu aware of the device used buffers, allowing it to
write the guest memory with its contents if needed.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-shadow-virtqueue.c | 15 +++++++++++++++
 hw/virtio/vhost-vdpa.c             | 13 +++++++++++++
 2 files changed, 28 insertions(+)

diff --git a/hw/virtio/vhost-shadow-virtqueue.c 
b/hw/virtio/vhost-shadow-virtqueue.c
index 6535eefccd..77916d2fed 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -75,6 +75,19 @@ void vhost_svq_set_host_mr_notifier(VhostShadowVirtqueue 
*svq, void *addr)
     svq->host_notifier_mr = addr;
 }
 
+/* Forward vhost notifications */
+static void vhost_svq_handle_call(EventNotifier *n)
+{
+    VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue,
+                                             hdev_call);
+
+    if (unlikely(!event_notifier_test_and_clear(n))) {
+        return;
+    }
+
+    event_notifier_set(&svq->svq_call);
+}
+
 /*
  * Obtain the SVQ call notifier, where vhost device notifies SVQ that there
  * exists pending used buffers.
@@ -200,6 +213,7 @@ VhostShadowVirtqueue *vhost_svq_new(struct vhost_dev *dev, 
int idx)
     }
 
     svq->vq = virtio_get_queue(dev->vdev, vq_idx);
+    event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
     return g_steal_pointer(&svq);
 
 err_init_hdev_call:
@@ -215,6 +229,7 @@ err_init_hdev_kick:
 void vhost_svq_free(VhostShadowVirtqueue *vq)
 {
     event_notifier_cleanup(&vq->hdev_kick);
+    event_notifier_set_handler(&vq->hdev_call, NULL);
     event_notifier_cleanup(&vq->hdev_call);
     g_free(vq);
 }
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 89d77f3452..c2580693b3 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -840,10 +840,14 @@ static bool vhost_vdpa_svq_start_vq(struct vhost_dev 
*dev, unsigned idx,
     struct vhost_vring_file vhost_kick_file = {
         .index = vq_index,
     };
+    struct vhost_vring_file vhost_call_file = {
+        .index = idx + dev->vq_index,
+    };
     int r;
 
     if (svq_mode) {
         const EventNotifier *vhost_kick = vhost_svq_get_dev_kick_notifier(svq);
+        const EventNotifier *vhost_call = vhost_svq_get_svq_call_notifier(svq);
 
         if (n->addr) {
             r = virtio_queue_set_host_notifier_mr(dev->vdev, idx, &n->mr,
@@ -856,9 +860,12 @@ static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, 
unsigned idx,
             assert(r == 0);
             vhost_svq_set_host_mr_notifier(svq, n->addr);
         }
+
+        vhost_svq_set_guest_call_notifier(svq, v->call_fd[idx]);
         vhost_svq_start(dev, idx, svq, v->kick_fd[idx]);
 
         vhost_kick_file.fd = event_notifier_get_fd(vhost_kick);
+        vhost_call_file.fd = event_notifier_get_fd(vhost_call);
     } else {
         vhost_svq_stop(dev, idx, svq);
 
@@ -872,6 +879,7 @@ static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, 
unsigned idx,
             assert(r == 0);
         }
         vhost_kick_file.fd = v->kick_fd[idx];
+        vhost_call_file.fd = v->call_fd[idx];
     }
 
     r = vhost_vdpa_set_vring_dev_kick(dev, &vhost_kick_file);
@@ -879,6 +887,11 @@ static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, 
unsigned idx,
         error_setg_errno(errp, -r, "vhost_vdpa_set_vring_kick failed");
         return false;
     }
+    r = vhost_vdpa_set_vring_dev_call(dev, &vhost_call_file);
+    if (unlikely(r)) {
+        error_setg_errno(errp, -r, "vhost_vdpa_set_vring_call failed");
+        return false;
+    }
 
     return true;
 }
-- 
2.27.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]