qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 6/7] vdpa: Add virtio-net mac address via CVQ at start


From: Jason Wang
Subject: Re: [PATCH v4 6/7] vdpa: Add virtio-net mac address via CVQ at start
Date: Mon, 25 Jul 2022 17:32:25 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:91.0) Gecko/20100101 Thunderbird/91.11.0


在 2022/7/22 19:12, Eugenio Pérez 写道:
This is needed so the destination vdpa device see the same state a the
guest set in the source.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
  net/vhost-vdpa.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++
  1 file changed, 61 insertions(+)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 61516b1432..3e15a42c35 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -365,10 +365,71 @@ static virtio_net_ctrl_ack 
vhost_vdpa_net_cvq_add(VhostShadowVirtqueue *svq,
      return VIRTIO_NET_OK;
  }
+static int vhost_vdpa_net_start(NetClientState *nc)
+{
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    struct vhost_vdpa *v = &s->vhost_vdpa;
+    VirtIONet *n;
+    uint64_t features;
+    VhostShadowVirtqueue *svq;
+
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+    if (!v->shadow_vqs_enabled) {
+        return 0;
+    }
+
+    if (v->dev->nvqs != 1 &&
+        v->dev->vq_index + v->dev->nvqs != v->dev->vq_index_end) {
+        /* Only interested in CVQ */
+        return 0;
+    }


I'd have a dedicated NetClientInfo for cvq.


+
+    n = VIRTIO_NET(v->dev->vdev);
+    features = v->dev->vdev->host_features;
+    svq = g_ptr_array_index(v->shadow_vqs, 0);
+    if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+        const struct virtio_net_ctrl_hdr ctrl = {
+            .class = VIRTIO_NET_CTRL_MAC,
+            .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
+        };
+        uint8_t mac[6];
+        const struct iovec out[] = {
+            {
+                .iov_base = (void *)&ctrl,
+                .iov_len = sizeof(ctrl),
+            },{
+                .iov_base = mac,
+                .iov_len = sizeof(mac),
+            },
+        };
+        struct iovec dev_buffers[2] = {
+            { .iov_base = s->cvq_cmd_out_buffer },
+            { .iov_base = s->cvq_cmd_in_buffer },
+        };
+        bool ok;
+        virtio_net_ctrl_ack state;
+
+        ok = vhost_vdpa_net_cvq_map_sg(s, out, ARRAY_SIZE(out), dev_buffers);


To speed up the state recovery, can we map those buffers during svq start?

Thanks


+        if (unlikely(!ok)) {
+            return -1;
+        }
+
+        memcpy(mac, n->mac, sizeof(mac));
+        state = vhost_vdpa_net_cvq_add(svq, dev_buffers);
+        vhost_vdpa_cvq_unmap_buf(v, dev_buffers[0].iov_base);
+        vhost_vdpa_cvq_unmap_buf(v, dev_buffers[1].iov_base);
+        return state == VIRTIO_NET_OK ? 0 : 1;
+    }
+
+    return 0;
+}
+
  static NetClientInfo net_vhost_vdpa_info = {
          .type = NET_CLIENT_DRIVER_VHOST_VDPA,
          .size = sizeof(VhostVDPAState),
          .receive = vhost_vdpa_receive,
+        .start = vhost_vdpa_net_start,
          .cleanup = vhost_vdpa_cleanup,
          .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
          .has_ufo = vhost_vdpa_has_ufo,




reply via email to

[Prev in Thread] Current Thread [Next in Thread]