[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 4/7] block-backend: Reorder flush/pdiscard function definitions
From: |
Kevin Wolf |
Subject: |
[PULL 4/7] block-backend: Reorder flush/pdiscard function definitions |
Date: |
Tue, 7 Apr 2020 16:26:13 +0200 |
Move all variants of the flush/pdiscard functions to a single place and
put the blk_co_*() version first because it is called by all other
variants (and will become static in the next patch).
Signed-off-by: Kevin Wolf <address@hidden>
Reviewed-by: Vladimir Sementsov-Ogievskiy <address@hidden>
Reviewed-by: Max Reitz <address@hidden>
Message-Id: <address@hidden>
Signed-off-by: Kevin Wolf <address@hidden>
---
block/block-backend.c | 92 +++++++++++++++++++++----------------------
1 file changed, 46 insertions(+), 46 deletions(-)
diff --git a/block/block-backend.c b/block/block-backend.c
index 8b8f2a80a0..17b2e87afa 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -1488,38 +1488,6 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t
offset,
blk_aio_write_entry, flags, cb, opaque);
}
-static void blk_aio_flush_entry(void *opaque)
-{
- BlkAioEmAIOCB *acb = opaque;
- BlkRwCo *rwco = &acb->rwco;
-
- rwco->ret = blk_co_flush(rwco->blk);
- blk_aio_complete(acb);
-}
-
-BlockAIOCB *blk_aio_flush(BlockBackend *blk,
- BlockCompletionFunc *cb, void *opaque)
-{
- return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
-}
-
-static void blk_aio_pdiscard_entry(void *opaque)
-{
- BlkAioEmAIOCB *acb = opaque;
- BlkRwCo *rwco = &acb->rwco;
-
- rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
- blk_aio_complete(acb);
-}
-
-BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
- int64_t offset, int bytes,
- BlockCompletionFunc *cb, void *opaque)
-{
- return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
- cb, opaque);
-}
-
void blk_aio_cancel(BlockAIOCB *acb)
{
bdrv_aio_cancel(acb);
@@ -1586,6 +1554,37 @@ int blk_co_pdiscard(BlockBackend *blk, int64_t offset,
int bytes)
return bdrv_co_pdiscard(blk->root, offset, bytes);
}
+static void blk_aio_pdiscard_entry(void *opaque)
+{
+ BlkAioEmAIOCB *acb = opaque;
+ BlkRwCo *rwco = &acb->rwco;
+
+ rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
+ blk_aio_complete(acb);
+}
+
+BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
+ int64_t offset, int bytes,
+ BlockCompletionFunc *cb, void *opaque)
+{
+ return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
+ cb, opaque);
+}
+
+static void blk_pdiscard_entry(void *opaque)
+{
+ BlkRwCo *rwco = opaque;
+ QEMUIOVector *qiov = rwco->iobuf;
+
+ rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
+ aio_wait_kick();
+}
+
+int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
+{
+ return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
+}
+
int blk_co_flush(BlockBackend *blk)
{
blk_wait_while_drained(blk);
@@ -1597,6 +1596,21 @@ int blk_co_flush(BlockBackend *blk)
return bdrv_co_flush(blk_bs(blk));
}
+static void blk_aio_flush_entry(void *opaque)
+{
+ BlkAioEmAIOCB *acb = opaque;
+ BlkRwCo *rwco = &acb->rwco;
+
+ rwco->ret = blk_co_flush(rwco->blk);
+ blk_aio_complete(acb);
+}
+
+BlockAIOCB *blk_aio_flush(BlockBackend *blk,
+ BlockCompletionFunc *cb, void *opaque)
+{
+ return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
+}
+
static void blk_flush_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
@@ -2083,20 +2097,6 @@ int blk_truncate(BlockBackend *blk, int64_t offset, bool
exact,
return bdrv_truncate(blk->root, offset, exact, prealloc, errp);
}
-static void blk_pdiscard_entry(void *opaque)
-{
- BlkRwCo *rwco = opaque;
- QEMUIOVector *qiov = rwco->iobuf;
-
- rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
- aio_wait_kick();
-}
-
-int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
-{
- return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
-}
-
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
int64_t pos, int size)
{
--
2.20.1
- [PULL 0/7] Block layer patches, Kevin Wolf, 2020/04/07
- [PULL 1/7] job: take each job's lock individually in job_txn_apply, Kevin Wolf, 2020/04/07
- [PULL 4/7] block-backend: Reorder flush/pdiscard function definitions,
Kevin Wolf <=
- [PULL 5/7] block: Increase BB.in_flight for coroutine and sync interfaces, Kevin Wolf, 2020/04/07
- [PULL 6/7] block: Fix blk->in_flight during blk_wait_while_drained(), Kevin Wolf, 2020/04/07
- [PULL 7/7] vpc: Don't round up already aligned BAT sizes, Kevin Wolf, 2020/04/07
- [PULL 3/7] backup: don't acquire aio_context in backup_clean, Kevin Wolf, 2020/04/07
- [PULL 2/7] replication: assert we own context before job_cancel_sync, Kevin Wolf, 2020/04/07
- Re: [PULL 0/7] Block layer patches, Peter Maydell, 2020/04/07