qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 4/5] vdpa: Add virtio-net mac address via CVQ at start


From: Eugenio Perez Martin
Subject: Re: [PATCH v2 4/5] vdpa: Add virtio-net mac address via CVQ at start
Date: Fri, 22 Jul 2022 09:02:14 +0200

On Fri, Jul 22, 2022 at 4:29 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Tue, Jul 19, 2022 at 12:30 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >
> > This is needed so the destination vdpa device see the same state a the
> > guest set in the source.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >  include/net/vhost-vdpa.h |  1 +
> >  hw/net/vhost_net.c       |  8 +++++
> >  net/vhost-vdpa.c         | 64 ++++++++++++++++++++++++++++++++++++++--
> >  3 files changed, 71 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/net/vhost-vdpa.h b/include/net/vhost-vdpa.h
> > index b81f9a6f2a..38d65e845d 100644
> > --- a/include/net/vhost-vdpa.h
> > +++ b/include/net/vhost-vdpa.h
> > @@ -15,6 +15,7 @@
> >  #define TYPE_VHOST_VDPA "vhost-vdpa"
> >
> >  struct vhost_net *vhost_vdpa_get_vhost_net(NetClientState *nc);
> > +int vhost_vdpa_start(NetClientState *nc);
> >
> >  extern const int vdpa_feature_bits[];
> >
> > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> > index ccac5b7a64..f9cebd9716 100644
> > --- a/hw/net/vhost_net.c
> > +++ b/hw/net/vhost_net.c
> > @@ -274,6 +274,13 @@ static int vhost_net_start_one(struct vhost_net *net,
> >              }
> >          }
> >      }
> > +
> > +    if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
> > +        r = vhost_vdpa_start(net->nc);
> > +        if (r < 0) {
> > +            goto fail;
> > +        }
> > +    }
>
> This seems tricky, I wonder if we can do this via NetClientInfo
> instead of directly via the vhost layer?
>

Route it via net->nc->info->start()? I think it's better, yes. It was
how it was done before [1].

vhost_kernel could also call to vhost_net_set_backend there.

> Note that the virtio-net has several places that check VDPA backend
> explicitly. This is suboptimal, I will post patch to hide them via
> NetClientInfo.
>
> >      return 0;
> >  fail:
> >      file.fd = -1;
> > @@ -373,6 +380,7 @@ int vhost_net_start(VirtIODevice *dev, NetClientState 
> > *ncs,
> >          r = vhost_net_start_one(get_vhost_net(peer), dev);
> >
> >          if (r < 0) {
> > +            vhost_net_stop_one(get_vhost_net(peer), dev);
>
> This should not be correct. vhost_net_start_one() fail means the
> device is not started, stop once again seems not again.
>

You're right, I think the rebase process put it here by mistake.

> >              goto err_start;
> >          }
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 533bd9f680..84e90f067a 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -342,9 +342,12 @@ static virtio_net_ctrl_ack 
> > vhost_vdpa_net_cvq_add(VhostShadowVirtqueue *svq,
> >      virtio_net_ctrl_ack status;
> >      size_t dev_written;
> >      int r;
> > -    void *unused = (void *)1;
> >
> > -    r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, unused);
> > +    /*
> > +     * Add a fake non-NULL VirtQueueElement since we'll remove before SVQ
> > +     * event loop can get it.
> > +     */
> > +    r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, (void 
> > *)1);
>
> Any reason we can 't simply pass NULL as the last parameter for 
> vhost_svq_add()?
>

Since we're being more similar to the kernel, we cannot pass NULL.
NULL as "data" would mean that there are no descriptors there.

The kernel passes the virtnet_info, but it's totally unused as you're
polling for receiving the only buffer it introduced previously. We
can:
a) Put whatever value we want here (svq? dev_written? any non NULL value work).
b) Delete this argument and use a sentinel value in SVQ to mark the
buffers not injected by the guest (separating the code from the kernel
one).

Thanks!

[1] 
https://patchwork.kernel.org/project/qemu-devel/patch/20220413163206.1958254-21-eperezma@redhat.com/

> Thanks
>
> >      if (unlikely(r != 0)) {
> >          if (unlikely(r == -ENOSPC)) {
> >              qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device 
> > queue\n",
> > @@ -372,6 +375,63 @@ static virtio_net_ctrl_ack 
> > vhost_vdpa_net_cvq_add(VhostShadowVirtqueue *svq,
> >      return VIRTIO_NET_OK;
> >  }
> >
> > +int vhost_vdpa_start(NetClientState *nc)
> > +{
> > +    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > +    struct vhost_vdpa *v = &s->vhost_vdpa;
> > +    VirtIONet *n = VIRTIO_NET(v->dev->vdev);
> > +    uint64_t features = v->dev->vdev->host_features;
> > +    VhostShadowVirtqueue *svq;
> > +
> > +    if (!v->shadow_vqs_enabled) {
> > +        return 0;
> > +    }
> > +
> > +    if (v->dev->vq_index + v->dev->nvqs != v->dev->vq_index_end) {
> > +        /* Only interested in CVQ */
> > +        return 0;
> > +    }
> > +
> > +    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > +
> > +    svq = g_ptr_array_index(v->shadow_vqs, 0);
> > +    if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
> > +        const struct virtio_net_ctrl_hdr ctrl = {
> > +            .class = VIRTIO_NET_CTRL_MAC,
> > +            .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
> > +        };
> > +        uint8_t mac[6];
> > +        const struct iovec out[] = {
> > +            {
> > +                .iov_base = (void *)&ctrl,
> > +                .iov_len = sizeof(ctrl),
> > +            },{
> > +                .iov_base = mac,
> > +                .iov_len = sizeof(mac),
> > +            },
> > +        };
> > +        struct iovec dev_buffers[2] = {
> > +            { .iov_base = s->cvq_cmd_out_buffer },
> > +            { .iov_base = s->cvq_cmd_in_buffer },
> > +        };
> > +        bool ok;
> > +        virtio_net_ctrl_ack state;
> > +
> > +        ok = vhost_vdpa_net_cvq_map_sg(s, out, ARRAY_SIZE(out), 
> > dev_buffers);
> > +        if (unlikely(!ok)) {
> > +            return -1;
> > +        }
> > +
> > +        memcpy(mac, n->mac, sizeof(mac));
> > +        state = vhost_vdpa_net_cvq_add(svq, dev_buffers);
> > +        vhost_vdpa_cvq_unmap_buf(v, dev_buffers[0].iov_base);
> > +        vhost_vdpa_cvq_unmap_buf(v, dev_buffers[1].iov_base);
> > +        return state == VIRTIO_NET_OK ? 0 : 1;
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> >  /**
> >   * Do not forward commands not supported by SVQ. Otherwise, the device 
> > could
> >   * accept it and qemu would not know how to update the device model.
> > --
> > 2.31.1
> >
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]