qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 03/24] thread-pool: Rename AIO pool functions to *_aio() a


From: Cédric Le Goater
Subject: Re: [PATCH v3 03/24] thread-pool: Rename AIO pool functions to *_aio() and data types to *Aio
Date: Tue, 26 Nov 2024 17:26:49 +0100
User-agent: Mozilla Thunderbird

On 11/17/24 20:19, Maciej S. Szmigiero wrote:
From: "Maciej S. Szmigiero" <maciej.szmigiero@oracle.com>

These names conflict with ones used by future generic thread pool
equivalents.
Generic names should belong to the generic pool type, not specific (AIO)
type.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>


Reviewed-by: Cédric Le Goater <clg@redhat.com>

Thanks,

C.


---
  include/block/aio.h         |  8 ++---
  include/block/thread-pool.h |  8 ++---
  util/async.c                |  6 ++--
  util/thread-pool.c          | 58 ++++++++++++++++++-------------------
  util/trace-events           |  4 +--
  5 files changed, 42 insertions(+), 42 deletions(-)

diff --git a/include/block/aio.h b/include/block/aio.h
index 43883a8a33a8..b2ab3514de23 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -54,7 +54,7 @@ typedef void QEMUBHFunc(void *opaque);
  typedef bool AioPollFn(void *opaque);
  typedef void IOHandler(void *opaque);
-struct ThreadPool;
+struct ThreadPoolAio;
  struct LinuxAioState;
  typedef struct LuringState LuringState;
@@ -207,7 +207,7 @@ struct AioContext {
      /* Thread pool for performing work and receiving completion callbacks.
       * Has its own locking.
       */
-    struct ThreadPool *thread_pool;
+    struct ThreadPoolAio *thread_pool;
#ifdef CONFIG_LINUX_AIO
      struct LinuxAioState *linux_aio;
@@ -500,8 +500,8 @@ void aio_set_event_notifier_poll(AioContext *ctx,
   */
  GSource *aio_get_g_source(AioContext *ctx);
-/* Return the ThreadPool bound to this AioContext */
-struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
+/* Return the ThreadPoolAio bound to this AioContext */
+struct ThreadPoolAio *aio_get_thread_pool(AioContext *ctx);
/* Setup the LinuxAioState bound to this AioContext */
  struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
index 4f6694026123..6f27eb085b45 100644
--- a/include/block/thread-pool.h
+++ b/include/block/thread-pool.h
@@ -24,10 +24,10 @@
typedef int ThreadPoolFunc(void *opaque); -typedef struct ThreadPool ThreadPool;
+typedef struct ThreadPoolAio ThreadPoolAio;
-ThreadPool *thread_pool_new(struct AioContext *ctx);
-void thread_pool_free(ThreadPool *pool);
+ThreadPoolAio *thread_pool_new_aio(struct AioContext *ctx);
+void thread_pool_free_aio(ThreadPoolAio *pool);
/*
   * thread_pool_submit_{aio,co} API: submit I/O requests in the thread's
@@ -36,7 +36,7 @@ void thread_pool_free(ThreadPool *pool);
  BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
                                     BlockCompletionFunc *cb, void *opaque);
  int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg);
+void thread_pool_update_params(ThreadPoolAio *pool, struct AioContext *ctx);
-void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx); #endif
diff --git a/util/async.c b/util/async.c
index 99db28389f66..f8b7678aefc8 100644
--- a/util/async.c
+++ b/util/async.c
@@ -369,7 +369,7 @@ aio_ctx_finalize(GSource     *source)
      QEMUBH *bh;
      unsigned flags;
- thread_pool_free(ctx->thread_pool);
+    thread_pool_free_aio(ctx->thread_pool);
#ifdef CONFIG_LINUX_AIO
      if (ctx->linux_aio) {
@@ -435,10 +435,10 @@ GSource *aio_get_g_source(AioContext *ctx)
      return &ctx->source;
  }
-ThreadPool *aio_get_thread_pool(AioContext *ctx)
+ThreadPoolAio *aio_get_thread_pool(AioContext *ctx)
  {
      if (!ctx->thread_pool) {
-        ctx->thread_pool = thread_pool_new(ctx);
+        ctx->thread_pool = thread_pool_new_aio(ctx);
      }
      return ctx->thread_pool;
  }
diff --git a/util/thread-pool.c b/util/thread-pool.c
index 2f751d55b33f..908194dc070f 100644
--- a/util/thread-pool.c
+++ b/util/thread-pool.c
@@ -23,9 +23,9 @@
  #include "block/thread-pool.h"
  #include "qemu/main-loop.h"
-static void do_spawn_thread(ThreadPool *pool);
+static void do_spawn_thread(ThreadPoolAio *pool);
-typedef struct ThreadPoolElement ThreadPoolElement;
+typedef struct ThreadPoolElementAio ThreadPoolElementAio;
enum ThreadState {
      THREAD_QUEUED,
@@ -33,9 +33,9 @@ enum ThreadState {
      THREAD_DONE,
  };
-struct ThreadPoolElement {
+struct ThreadPoolElementAio {
      BlockAIOCB common;
-    ThreadPool *pool;
+    ThreadPoolAio *pool;
      ThreadPoolFunc *func;
      void *arg;
@@ -47,13 +47,13 @@ struct ThreadPoolElement {
      int ret;
/* Access to this list is protected by lock. */
-    QTAILQ_ENTRY(ThreadPoolElement) reqs;
+    QTAILQ_ENTRY(ThreadPoolElementAio) reqs;
/* This list is only written by the thread pool's mother thread. */
-    QLIST_ENTRY(ThreadPoolElement) all;
+    QLIST_ENTRY(ThreadPoolElementAio) all;
  };
-struct ThreadPool {
+struct ThreadPoolAio {
      AioContext *ctx;
      QEMUBH *completion_bh;
      QemuMutex lock;
@@ -62,10 +62,10 @@ struct ThreadPool {
      QEMUBH *new_thread_bh;
/* The following variables are only accessed from one AioContext. */
-    QLIST_HEAD(, ThreadPoolElement) head;
+    QLIST_HEAD(, ThreadPoolElementAio) head;
/* The following variables are protected by lock. */
-    QTAILQ_HEAD(, ThreadPoolElement) request_list;
+    QTAILQ_HEAD(, ThreadPoolElementAio) request_list;
      int cur_threads;
      int idle_threads;
      int new_threads;     /* backlog of threads we need to create */
@@ -76,14 +76,14 @@ struct ThreadPool {
static void *worker_thread(void *opaque)
  {
-    ThreadPool *pool = opaque;
+    ThreadPoolAio *pool = opaque;
qemu_mutex_lock(&pool->lock);
      pool->pending_threads--;
      do_spawn_thread(pool);
while (pool->cur_threads <= pool->max_threads) {
-        ThreadPoolElement *req;
+        ThreadPoolElementAio *req;
          int ret;
if (QTAILQ_EMPTY(&pool->request_list)) {
@@ -131,7 +131,7 @@ static void *worker_thread(void *opaque)
      return NULL;
  }
-static void do_spawn_thread(ThreadPool *pool)
+static void do_spawn_thread(ThreadPoolAio *pool)
  {
      QemuThread t;
@@ -148,14 +148,14 @@ static void do_spawn_thread(ThreadPool *pool) static void spawn_thread_bh_fn(void *opaque)
  {
-    ThreadPool *pool = opaque;
+    ThreadPoolAio *pool = opaque;
qemu_mutex_lock(&pool->lock);
      do_spawn_thread(pool);
      qemu_mutex_unlock(&pool->lock);
  }
-static void spawn_thread(ThreadPool *pool)
+static void spawn_thread(ThreadPoolAio *pool)
  {
      pool->cur_threads++;
      pool->new_threads++;
@@ -173,8 +173,8 @@ static void spawn_thread(ThreadPool *pool)
static void thread_pool_completion_bh(void *opaque)
  {
-    ThreadPool *pool = opaque;
-    ThreadPoolElement *elem, *next;
+    ThreadPoolAio *pool = opaque;
+    ThreadPoolElementAio *elem, *next;
defer_call_begin(); /* cb() may use defer_call() to coalesce work */ @@ -184,8 +184,8 @@ restart:
              continue;
          }
- trace_thread_pool_complete(pool, elem, elem->common.opaque,
-                                   elem->ret);
+        trace_thread_pool_complete_aio(pool, elem, elem->common.opaque,
+                                       elem->ret);
          QLIST_REMOVE(elem, all);
if (elem->common.cb) {
@@ -217,10 +217,10 @@ restart:
static void thread_pool_cancel(BlockAIOCB *acb)
  {
-    ThreadPoolElement *elem = (ThreadPoolElement *)acb;
-    ThreadPool *pool = elem->pool;
+    ThreadPoolElementAio *elem = (ThreadPoolElementAio *)acb;
+    ThreadPoolAio *pool = elem->pool;
- trace_thread_pool_cancel(elem, elem->common.opaque);
+    trace_thread_pool_cancel_aio(elem, elem->common.opaque);
QEMU_LOCK_GUARD(&pool->lock);
      if (elem->state == THREAD_QUEUED) {
@@ -234,16 +234,16 @@ static void thread_pool_cancel(BlockAIOCB *acb)
  }
static const AIOCBInfo thread_pool_aiocb_info = {
-    .aiocb_size         = sizeof(ThreadPoolElement),
+    .aiocb_size         = sizeof(ThreadPoolElementAio),
      .cancel_async       = thread_pool_cancel,
  };
BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
                                     BlockCompletionFunc *cb, void *opaque)
  {
-    ThreadPoolElement *req;
+    ThreadPoolElementAio *req;
      AioContext *ctx = qemu_get_current_aio_context();
-    ThreadPool *pool = aio_get_thread_pool(ctx);
+    ThreadPoolAio *pool = aio_get_thread_pool(ctx);
/* Assert that the thread submitting work is the same running the pool */
      assert(pool->ctx == qemu_get_current_aio_context());
@@ -290,7 +290,7 @@ int coroutine_fn thread_pool_submit_co(ThreadPoolFunc 
*func, void *arg)
      return tpc.ret;
  }
-void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
+void thread_pool_update_params(ThreadPoolAio *pool, AioContext *ctx)
  {
      qemu_mutex_lock(&pool->lock);
@@ -317,7 +317,7 @@ void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
      qemu_mutex_unlock(&pool->lock);
  }
-static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
+static void thread_pool_init_one(ThreadPoolAio *pool, AioContext *ctx)
  {
      if (!ctx) {
          ctx = qemu_get_aio_context();
@@ -337,14 +337,14 @@ static void thread_pool_init_one(ThreadPool *pool, 
AioContext *ctx)
      thread_pool_update_params(pool, ctx);
  }
-ThreadPool *thread_pool_new(AioContext *ctx)
+ThreadPoolAio *thread_pool_new_aio(AioContext *ctx)
  {
-    ThreadPool *pool = g_new(ThreadPool, 1);
+    ThreadPoolAio *pool = g_new(ThreadPoolAio, 1);
      thread_pool_init_one(pool, ctx);
      return pool;
  }
-void thread_pool_free(ThreadPool *pool)
+void thread_pool_free_aio(ThreadPoolAio *pool)
  {
      if (!pool) {
          return;
diff --git a/util/trace-events b/util/trace-events
index 5be12d7fab89..bd8f25fb5920 100644
--- a/util/trace-events
+++ b/util/trace-events
@@ -15,8 +15,8 @@ reentrant_aio(void *ctx, const char *name) "ctx %p name %s"
# thread-pool.c
  thread_pool_submit_aio(void *pool, void *req, void *opaque) "pool %p req %p opaque 
%p"
-thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p 
opaque %p ret %d"
-thread_pool_cancel(void *req, void *opaque) "req %p opaque %p"
+thread_pool_complete_aio(void *pool, void *req, void *opaque, int ret) "pool %p req 
%p opaque %p ret %d"
+thread_pool_cancel_aio(void *req, void *opaque) "req %p opaque %p"
# buffer.c
  buffer_resize(const char *buf, size_t olen, size_t len) "%s: old %zd, new %zd"





reply via email to

[Prev in Thread] Current Thread [Next in Thread]