qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 04/14] util/dsa: Implement DSA task enqueue and dequeue.


From: Fabiano Rosas
Subject: Re: [PATCH v4 04/14] util/dsa: Implement DSA task enqueue and dequeue.
Date: Thu, 25 Apr 2024 17:55:08 -0300

Hao Xiang <hao.xiang@linux.dev> writes:

> * Use a safe thread queue for DSA task enqueue/dequeue.
> * Implement DSA task submission.
> * Implement DSA batch task submission.
>
> Signed-off-by: Hao Xiang <hao.xiang@linux.dev>
> ---
>  include/qemu/dsa.h |  28 +++++++
>  util/dsa.c         | 201 +++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 229 insertions(+)
>
> diff --git a/include/qemu/dsa.h b/include/qemu/dsa.h
> index f15c05ee85..37cae8d9d2 100644
> --- a/include/qemu/dsa.h
> +++ b/include/qemu/dsa.h
> @@ -13,6 +13,34 @@
>  #include <linux/idxd.h>
>  #include "x86intrin.h"
>  
> +typedef enum DsaTaskType {
> +    DSA_TASK = 0,
> +    DSA_BATCH_TASK
> +} DsaTaskType;
> +
> +typedef enum DsaTaskStatus {
> +    DSA_TASK_READY = 0,
> +    DSA_TASK_PROCESSING,
> +    DSA_TASK_COMPLETION
> +} DsaTaskStatus;
> +
> +typedef void (*dsa_completion_fn)(void *);
> +
> +typedef struct dsa_batch_task {
> +    struct dsa_hw_desc batch_descriptor;
> +    struct dsa_hw_desc *descriptors;
> +    struct dsa_completion_record batch_completion 
> __attribute__((aligned(32)));
> +    struct dsa_completion_record *completions;
> +    struct dsa_device_group *group;
> +    struct dsa_device *device;
> +    dsa_completion_fn completion_callback;
> +    QemuSemaphore sem_task_complete;
> +    DsaTaskType task_type;
> +    DsaTaskStatus status;
> +    int batch_size;
> +    QSIMPLEQ_ENTRY(dsa_batch_task) entry;
> +} dsa_batch_task;
> +
>  /**
>   * @brief Initializes DSA devices.
>   *
> diff --git a/util/dsa.c b/util/dsa.c
> index 05bbf8e31a..75739a1af6 100644
> --- a/util/dsa.c
> +++ b/util/dsa.c
> @@ -244,6 +244,205 @@ dsa_device_group_get_next_device(struct 
> dsa_device_group *group)
>      return &group->dsa_devices[current];
>  }
>  
> +/**
> + * @brief Empties out the DSA task queue.
> + *
> + * @param group A pointer to the DSA device group.
> + */
> +static void
> +dsa_empty_task_queue(struct dsa_device_group *group)
> +{
> +    qemu_mutex_lock(&group->task_queue_lock);
> +    dsa_task_queue *task_queue = &group->task_queue;
> +    while (!QSIMPLEQ_EMPTY(task_queue)) {
> +        QSIMPLEQ_REMOVE_HEAD(task_queue, entry);
> +    }
> +    qemu_mutex_unlock(&group->task_queue_lock);
> +}
> +
> +/**
> + * @brief Adds a task to the DSA task queue.
> + *
> + * @param group A pointer to the DSA device group.
> + * @param context A pointer to the DSA task to enqueue.

This is wrong^

> + *
> + * @return int Zero if successful, otherwise a proper error code.
> + */
> +static int
> +dsa_task_enqueue(struct dsa_device_group *group,
> +                 struct dsa_batch_task *task)
> +{
> +    dsa_task_queue *task_queue = &group->task_queue;
> +    QemuMutex *task_queue_lock = &group->task_queue_lock;
> +    QemuCond *task_queue_cond = &group->task_queue_cond;

It's more idiomatic to not hold any of these in a variable, just access
them directly.

> +
> +    bool notify = false;
> +
> +    qemu_mutex_lock(task_queue_lock);
> +
> +    if (!group->running) {
> +        error_report("DSA: Tried to queue task to stopped device queue.");
> +        qemu_mutex_unlock(task_queue_lock);
> +        return -1;
> +    }
> +
> +    /* The queue is empty. This enqueue operation is a 0->1 transition. */
> +    if (QSIMPLEQ_EMPTY(task_queue)) {
> +        notify = true;
> +    }
> +
> +    QSIMPLEQ_INSERT_TAIL(task_queue, task, entry);
> +
> +    /* We need to notify the waiter for 0->1 transitions. */
> +    if (notify) {
> +        qemu_cond_signal(task_queue_cond);
> +    }
> +
> +    qemu_mutex_unlock(task_queue_lock);
> +
> +    return 0;
> +}
> +
> +/**
> + * @brief Takes a DSA task out of the task queue.
> + *
> + * @param group A pointer to the DSA device group.
> + * @return dsa_batch_task* The DSA task being dequeued.
> + */
> +__attribute__((unused))
> +static struct dsa_batch_task *
> +dsa_task_dequeue(struct dsa_device_group *group)
> +{
> +    struct dsa_batch_task *task = NULL;
> +    dsa_task_queue *task_queue = &group->task_queue;
> +    QemuMutex *task_queue_lock = &group->task_queue_lock;
> +    QemuCond *task_queue_cond = &group->task_queue_cond;

Same here.

> +
> +    qemu_mutex_lock(task_queue_lock);
> +
> +    while (true) {
> +        if (!group->running) {
> +            goto exit;
> +        }
> +        task = QSIMPLEQ_FIRST(task_queue);
> +        if (task != NULL) {
> +            break;
> +        }
> +        qemu_cond_wait(task_queue_cond, task_queue_lock);
> +    }
> +
> +    QSIMPLEQ_REMOVE_HEAD(task_queue, entry);
> +
> +exit:
> +    qemu_mutex_unlock(task_queue_lock);
> +    return task;
> +}
> +
> +/**
> + * @brief Submits a DSA work item to the device work queue.
> + *
> + * @param wq A pointer to the DSA work queue's device memory.
> + * @param descriptor A pointer to the DSA work item descriptor.
> + *
> + * @return Zero if successful, non-zero otherwise.
> + */
> +static int
> +submit_wi_int(void *wq, struct dsa_hw_desc *descriptor)
> +{
> +    uint64_t retry = 0;
> +
> +    _mm_sfence();
> +
> +    while (true) {
> +        if (_enqcmd(wq, descriptor) == 0) {
> +            break;
> +        }
> +        retry++;
> +        if (retry > max_retry_count) {
> +            error_report("Submit work retry %lu times.", retry);
> +            return -1;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +/**
> + * @brief Synchronously submits a DSA work item to the
> + *        device work queue.
> + *
> + * @param wq A pointer to the DSA worjk queue's device memory.

s/worjk/work/

> + * @param descriptor A pointer to the DSA work item descriptor.
> + *
> + * @return int Zero if successful, non-zero otherwise.
> + */
> +__attribute__((unused))
> +static int
> +submit_wi(void *wq, struct dsa_hw_desc *descriptor)
> +{
> +    return submit_wi_int(wq, descriptor);
> +}
> +
> +/**
> + * @brief Asynchronously submits a DSA work item to the
> + *        device work queue.
> + *
> + * @param task A pointer to the buffer zero task.
> + *
> + * @return int Zero if successful, non-zero otherwise.
> + */
> +__attribute__((unused))
> +static int
> +submit_wi_async(struct dsa_batch_task *task)
> +{
> +    struct dsa_device_group *device_group = task->group;
> +    struct dsa_device *device_instance = task->device;
> +    int ret;
> +
> +    assert(task->task_type == DSA_TASK);
> +
> +    task->status = DSA_TASK_PROCESSING;
> +
> +    ret = submit_wi_int(device_instance->work_queue,
> +                        &task->descriptors[0]);
> +    if (ret != 0) {
> +        return ret;
> +    }
> +
> +    return dsa_task_enqueue(device_group, task);
> +}
> +
> +/**
> + * @brief Asynchronously submits a DSA batch work item to the
> + *        device work queue.
> + *
> + * @param dsa_batch_task A pointer to the batch buffer zero task.

s/buffer zero //

> + *
> + * @return int Zero if successful, non-zero otherwise.
> + */
> +__attribute__((unused))
> +static int
> +submit_batch_wi_async(struct dsa_batch_task *batch_task)
> +{
> +    struct dsa_device_group *device_group = batch_task->group;
> +    struct dsa_device *device_instance = batch_task->device;
> +    int ret;
> +
> +    assert(batch_task->task_type == DSA_BATCH_TASK);
> +    assert(batch_task->batch_descriptor.desc_count <= 
> batch_task->batch_size);
> +    assert(batch_task->status == DSA_TASK_READY);
> +
> +    batch_task->status = DSA_TASK_PROCESSING;
> +
> +    ret = submit_wi_int(device_instance->work_queue,
> +                        &batch_task->batch_descriptor);
> +    if (ret != 0) {
> +        return ret;
> +    }
> +
> +    return dsa_task_enqueue(device_group, batch_task);
> +}
> +
>  /**
>   * @brief Check if DSA is running.
>   *
> @@ -300,6 +499,8 @@ void dsa_stop(void)
>      if (!group->running) {
>          return;
>      }
> +
> +    dsa_empty_task_queue(group);
>  }
>  
>  /**



reply via email to

[Prev in Thread] Current Thread [Next in Thread]