[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH 05/16] hw/block/nvme: refactor dma read/write
From: |
Maxim Levitsky |
Subject: |
Re: [PATCH 05/16] hw/block/nvme: refactor dma read/write |
Date: |
Wed, 29 Jul 2020 20:35:46 +0300 |
User-agent: |
Evolution 3.36.3 (3.36.3-1.fc32) |
On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> From: Klaus Jensen <k.jensen@samsung.com>
>
> Refactor the nvme_dma_{read,write}_prp functions into a common function
> taking a DMADirection parameter.
>
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
> ---
> hw/block/nvme.c | 88 ++++++++++++++++++++++++-------------------------
> 1 file changed, 43 insertions(+), 45 deletions(-)
>
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 6a1a1626b87b..d314a604db81 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -361,55 +361,50 @@ unmap:
> return status;
> }
>
> -static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> - uint64_t prp1, uint64_t prp2)
> +static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> + uint64_t prp1, uint64_t prp2, DMADirection dir)
> {
> QEMUSGList qsg;
> QEMUIOVector iov;
> uint16_t status = NVME_SUCCESS;
>
> - if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
> - return NVME_INVALID_FIELD | NVME_DNR;
> + status = nvme_map_prp(&qsg, &iov, prp1, prp2, len, n);
> + if (status) {
> + return status;
> }
> +
> if (qsg.nsg > 0) {
> - if (dma_buf_write(ptr, len, &qsg)) {
> - status = NVME_INVALID_FIELD | NVME_DNR;
> + uint64_t residual;
> +
> + if (dir == DMA_DIRECTION_TO_DEVICE) {
> + residual = dma_buf_write(ptr, len, &qsg);
> + } else {
> + residual = dma_buf_read(ptr, len, &qsg);
> }
> - qemu_sglist_destroy(&qsg);
> - } else {
> - if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
> - status = NVME_INVALID_FIELD | NVME_DNR;
> - }
> - qemu_iovec_destroy(&iov);
> - }
> - return status;
> -}
>
> -static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> - uint64_t prp1, uint64_t prp2)
> -{
> - QEMUSGList qsg;
> - QEMUIOVector iov;
> - uint16_t status = NVME_SUCCESS;
> -
> - trace_pci_nvme_dma_read(prp1, prp2);
> -
> - if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
> - return NVME_INVALID_FIELD | NVME_DNR;
> - }
> - if (qsg.nsg > 0) {
> - if (unlikely(dma_buf_read(ptr, len, &qsg))) {
> + if (unlikely(residual)) {
> trace_pci_nvme_err_invalid_dma();
> status = NVME_INVALID_FIELD | NVME_DNR;
> }
> +
> qemu_sglist_destroy(&qsg);
> } else {
> - if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
> + size_t bytes;
> +
> + if (dir == DMA_DIRECTION_TO_DEVICE) {
> + bytes = qemu_iovec_to_buf(&iov, 0, ptr, len);
> + } else {
> + bytes = qemu_iovec_from_buf(&iov, 0, ptr, len);
> + }
> +
> + if (unlikely(bytes != len)) {
> trace_pci_nvme_err_invalid_dma();
> status = NVME_INVALID_FIELD | NVME_DNR;
> }
> +
> qemu_iovec_destroy(&iov);
> }
> +
I know I reviewed this, but thinking now, why not to add an assert here
that we don't have both iov and qsg with data.
Best regards,
Maxim Levitsky
> return status;
> }
>
> @@ -840,8 +835,8 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd
> *cmd, uint8_t rae,
> nvme_clear_events(n, NVME_AER_TYPE_SMART);
> }
>
> - return nvme_dma_read_prp(n, (uint8_t *) &smart + off, trans_len, prp1,
> - prp2);
> + return nvme_dma_prp(n, (uint8_t *) &smart + off, trans_len, prp1, prp2,
> + DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
> @@ -862,8 +857,8 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd
> *cmd, uint32_t buf_len,
>
> trans_len = MIN(sizeof(fw_log) - off, buf_len);
>
> - return nvme_dma_read_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1,
> - prp2);
> + return nvme_dma_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1, prp2,
> + DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
> @@ -887,7 +882,8 @@ static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd
> *cmd, uint8_t rae,
>
> trans_len = MIN(sizeof(errlog) - off, buf_len);
>
> - return nvme_dma_read_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2);
> + return nvme_dma_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2,
> + DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> @@ -1042,8 +1038,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n,
> NvmeIdentify *c)
>
> trace_pci_nvme_identify_ctrl();
>
> - return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
> - prp1, prp2);
> + return nvme_dma_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), prp1,
> + prp2, DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
> @@ -1062,8 +1058,8 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n,
> NvmeIdentify *c)
>
> ns = &n->namespaces[nsid - 1];
>
> - return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
> - prp1, prp2);
> + return nvme_dma_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), prp1,
> + prp2, DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
> @@ -1098,7 +1094,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n,
> NvmeIdentify *c)
> break;
> }
> }
> - ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2);
> + ret = nvme_dma_prp(n, (uint8_t *)list, data_len, prp1, prp2,
> + DMA_DIRECTION_FROM_DEVICE);
> g_free(list);
> return ret;
> }
> @@ -1139,7 +1136,8 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
> *n, NvmeIdentify *c)
> ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
> stl_be_p(&ns_descrs->uuid.v, nsid);
>
> - return nvme_dma_read_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2);
> + return nvme_dma_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2,
> + DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
> @@ -1220,8 +1218,8 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n,
> NvmeCmd *cmd)
>
> uint64_t timestamp = nvme_get_timestamp(n);
>
> - return nvme_dma_read_prp(n, (uint8_t *)×tamp,
> - sizeof(timestamp), prp1, prp2);
> + return nvme_dma_prp(n, (uint8_t *)×tamp, sizeof(timestamp), prp1,
> + prp2, DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> @@ -1352,8 +1350,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n,
> NvmeCmd *cmd)
> uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
> uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>
> - ret = nvme_dma_write_prp(n, (uint8_t *)×tamp,
> - sizeof(timestamp), prp1, prp2);
> + ret = nvme_dma_prp(n, (uint8_t *)×tamp, sizeof(timestamp), prp1,
> + prp2, DMA_DIRECTION_TO_DEVICE);
> if (ret != NVME_SUCCESS) {
> return ret;
> }
- Re: [PATCH 04/16] hw/block/nvme: remove redundant has_sg member, (continued)
- [PATCH 06/16] hw/block/nvme: pass request along for tracing, Klaus Jensen, 2020/07/20
- [PATCH 03/16] hw/block/nvme: replace dma_acct with blk_acct equivalent, Klaus Jensen, 2020/07/20
- [PATCH 08/16] hw/block/nvme: verify validity of prp lists in the cmb, Klaus Jensen, 2020/07/20
- [PATCH 05/16] hw/block/nvme: refactor dma read/write, Klaus Jensen, 2020/07/20
- [PATCH 10/16] hw/block/nvme: add check for mdts, Klaus Jensen, 2020/07/20
- [PATCH 07/16] hw/block/nvme: add request mapping helper, Klaus Jensen, 2020/07/20
- [PATCH 09/16] hw/block/nvme: refactor request bounds checking, Klaus Jensen, 2020/07/20