qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 07/11] util/vhost-user-server: fix memory leak in vu_message_read


From: Stefan Hajnoczi
Subject: [PATCH 07/11] util/vhost-user-server: fix memory leak in vu_message_read()
Date: Tue, 22 Sep 2020 17:03:57 +0100

fds[] is leaked when qio_channel_readv_full() fails.

Use vmsg->fds[] instead of keeping a local fds[] array. Then we can
reuse goto fail to clean up fds. vmsg->fd_num must be zeroed before the
loop to make this safe.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 util/vhost-user-server.c | 50 ++++++++++++++++++----------------------
 1 file changed, 23 insertions(+), 27 deletions(-)

diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
index 892815827d..5a60e2ca2a 100644
--- a/util/vhost-user-server.c
+++ b/util/vhost-user-server.c
@@ -100,21 +100,11 @@ vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg 
*vmsg)
     };
     int rc, read_bytes = 0;
     Error *local_err = NULL;
-    /*
-     * Store fds/nfds returned from qio_channel_readv_full into
-     * temporary variables.
-     *
-     * VhostUserMsg is a packed structure, gcc will complain about passing
-     * pointer to a packed structure member if we pass &VhostUserMsg.fd_num
-     * and &VhostUserMsg.fds directly when calling qio_channel_readv_full,
-     * thus two temporary variables nfds and fds are used here.
-     */
-    size_t nfds = 0, nfds_t = 0;
     const size_t max_fds = G_N_ELEMENTS(vmsg->fds);
-    int *fds_t = NULL;
     VuServer *server = container_of(vu_dev, VuServer, vu_dev);
     QIOChannel *ioc = server->ioc;
 
+    vmsg->fd_num = 0;
     if (!ioc) {
         error_report_err(local_err);
         goto fail;
@@ -122,41 +112,47 @@ vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg 
*vmsg)
 
     assert(qemu_in_coroutine());
     do {
+        size_t nfds = 0;
+        int *fds = NULL;
+
         /*
          * qio_channel_readv_full may have short reads, keeping calling it
          * until getting VHOST_USER_HDR_SIZE or 0 bytes in total
          */
-        rc = qio_channel_readv_full(ioc, &iov, 1, &fds_t, &nfds_t, &local_err);
+        rc = qio_channel_readv_full(ioc, &iov, 1, &fds, &nfds, &local_err);
         if (rc < 0) {
             if (rc == QIO_CHANNEL_ERR_BLOCK) {
+                assert(local_err == NULL);
                 qio_channel_yield(ioc, G_IO_IN);
                 continue;
             } else {
                 error_report_err(local_err);
-                return false;
+                goto fail;
             }
         }
-        read_bytes += rc;
-        if (nfds_t > 0) {
-            if (nfds + nfds_t > max_fds) {
+
+        if (nfds > 0) {
+            if (vmsg->fd_num + nfds > max_fds) {
                 error_report("A maximum of %zu fds are allowed, "
                              "however got %lu fds now",
-                             max_fds, nfds + nfds_t);
+                             max_fds, vmsg->fd_num + nfds);
+                g_free(fds);
                 goto fail;
             }
-            memcpy(vmsg->fds + nfds, fds_t,
-                   nfds_t *sizeof(vmsg->fds[0]));
-            nfds += nfds_t;
-            g_free(fds_t);
+            memcpy(vmsg->fds + vmsg->fd_num, fds, nfds * sizeof(vmsg->fds[0]));
+            vmsg->fd_num += nfds;
+            g_free(fds);
         }
-        if (read_bytes == VHOST_USER_HDR_SIZE || rc == 0) {
-            break;
+
+        if (rc == 0) { /* socket closed */
+            goto fail;
         }
-        iov.iov_base = (char *)vmsg + read_bytes;
-        iov.iov_len = VHOST_USER_HDR_SIZE - read_bytes;
-    } while (true);
 
-    vmsg->fd_num = nfds;
+        iov.iov_base += rc;
+        iov.iov_len -= rc;
+        read_bytes += rc;
+    } while (read_bytes != VHOST_USER_HDR_SIZE);
+
     /* qio_channel_readv_full will make socket fds blocking, unblock them */
     vmsg_unblock_fds(vmsg);
     if (vmsg->size > sizeof(vmsg->payload)) {
-- 
2.26.2


reply via email to

[Prev in Thread] Current Thread [Next in Thread]