qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC PATCH v9 19/23] vdpa: Extract get features part from vhost_vdpa


From: Jason Wang
Subject: Re: [RFC PATCH v9 19/23] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs
Date: Tue, 12 Jul 2022 12:11:30 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:91.0) Gecko/20100101 Thunderbird/91.11.0


在 2022/7/7 02:40, Eugenio Pérez 写道:
To know the device features is needed for CVQ SVQ, so SVQ knows if it
can handle all commands or not. Extract from
vhost_vdpa_get_max_queue_pairs so we can reuse it.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>


Acked-by: Jason Wang <jasowang@redhat.com>


---
  net/vhost-vdpa.c | 30 ++++++++++++++++++++----------
  1 file changed, 20 insertions(+), 10 deletions(-)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index df1e69ee72..b0158f625e 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -219,20 +219,24 @@ static NetClientState *net_vhost_vdpa_init(NetClientState 
*peer,
      return nc;
  }
-static int vhost_vdpa_get_max_queue_pairs(int fd, int *has_cvq, Error **errp)
+static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
+{
+    int ret = ioctl(fd, VHOST_GET_FEATURES, features);
+    if (ret) {
+        error_setg_errno(errp, errno,
+                         "Fail to query features from vhost-vDPA device");
+    }
+    return ret;
+}
+
+static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
+                                          int *has_cvq, Error **errp)
  {
      unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
      g_autofree struct vhost_vdpa_config *config = NULL;
      __virtio16 *max_queue_pairs;
-    uint64_t features;
      int ret;
- ret = ioctl(fd, VHOST_GET_FEATURES, &features);
-    if (ret) {
-        error_setg(errp, "Fail to query features from vhost-vDPA device");
-        return ret;
-    }
-
      if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
          *has_cvq = 1;
      } else {
@@ -262,10 +266,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char 
*name,
                          NetClientState *peer, Error **errp)
  {
      const NetdevVhostVDPAOptions *opts;
+    uint64_t features;
      int vdpa_device_fd;
      g_autofree NetClientState **ncs = NULL;
      NetClientState *nc;
-    int queue_pairs, i, has_cvq = 0;
+    int queue_pairs, r, i, has_cvq = 0;
assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
      opts = &netdev->u.vhost_vdpa;
@@ -279,7 +284,12 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char 
*name,
          return -errno;
      }
- queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd,
+    r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
+    if (r) {
+        return r;
+    }
+
+    queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
                                                   &has_cvq, errp);
      if (queue_pairs < 0) {
          qemu_close(vdpa_device_fd);




reply via email to

[Prev in Thread] Current Thread [Next in Thread]