Loading include/spdk/bdev.h +4 −15 Original line number Diff line number Diff line Loading @@ -189,17 +189,11 @@ struct spdk_bdev_io { /** The unaligned rbuf originally allocated. */ void *buf_unaligned; /** For basic read case, use our own iovec element. */ struct iovec iov; /** For SG buffer cases, array of iovecs to transfer. */ struct iovec *iovs; /** For single buffer cases, pointer to the aligned data buffer. */ void *buf; /** For SG buffer cases, number of iovecs in iovec array. */ int iovcnt; /** For SG buffer cases, total size of data to be transferred. */ size_t len; /** For single buffer cases, size of the data buffer. */ uint64_t nbytes; /** Starting offset (in bytes) of the blockdev for this I/O. */ uint64_t offset; Loading Loading @@ -285,11 +279,6 @@ bool spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type struct spdk_bdev_io *spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch, void *buf, uint64_t offset, uint64_t nbytes, spdk_bdev_io_completion_cb cb, void *cb_arg); struct spdk_bdev_io * spdk_bdev_readv(struct spdk_bdev *bdev, struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset, uint64_t nbytes, spdk_bdev_io_completion_cb cb, void *cb_arg); struct spdk_bdev_io *spdk_bdev_write(struct spdk_bdev *bdev, struct spdk_io_channel *ch, void *buf, uint64_t offset, uint64_t nbytes, spdk_bdev_io_completion_cb cb, void *cb_arg); Loading lib/bdev/aio/blockdev_aio.c +14 −16 Original line number Diff line number Diff line Loading @@ -98,9 +98,8 @@ blockdev_aio_close(struct file_disk *disk) } static int64_t blockdev_aio_readv(struct file_disk *fdisk, struct spdk_io_channel *ch, struct blockdev_aio_task *aio_task, struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset) blockdev_aio_read(struct file_disk *fdisk, struct spdk_io_channel *ch, struct blockdev_aio_task *aio_task, void *buf, uint64_t nbytes, uint64_t offset) { struct iocb *iocb = &aio_task->iocb; struct blockdev_aio_io_channel *aio_ch = spdk_io_channel_get_ctx(ch); Loading @@ -108,15 +107,15 @@ blockdev_aio_readv(struct file_disk *fdisk, struct spdk_io_channel *ch, iocb->aio_fildes = fdisk->fd; iocb->aio_reqprio = 0; iocb->aio_lio_opcode = IO_CMD_PREADV; iocb->u.v.vec = iov; iocb->u.v.nr = iovcnt; iocb->u.v.offset = offset; iocb->aio_lio_opcode = IO_CMD_PREAD; iocb->u.c.buf = buf; iocb->u.c.nbytes = nbytes; iocb->u.c.offset = offset; iocb->data = aio_task; aio_task->len = nbytes; SPDK_TRACELOG(SPDK_TRACE_AIO, "read %d iovs size %lu to off: %#lx\n", iovcnt, nbytes, offset); SPDK_TRACELOG(SPDK_TRACE_AIO, "read from %p of size %lu to off: %#lx\n", buf, nbytes, offset); rc = io_submit(aio_ch->io_ctx, 1, &iocb); if (rc < 0) { Loading Loading @@ -246,13 +245,12 @@ static void blockdev_aio_get_rbuf_cb(struct spdk_bdev_io *bdev_io) { int ret = 0; ret = blockdev_aio_readv((struct file_disk *)bdev_io->ctx, ret = blockdev_aio_read((struct file_disk *)bdev_io->ctx, bdev_io->ch, (struct blockdev_aio_task *)bdev_io->driver_ctx, bdev_io->u.read.iovs, bdev_io->u.read.iovcnt, bdev_io->u.read.offset, bdev_io->u.read.len); bdev_io->u.read.buf, bdev_io->u.read.nbytes, bdev_io->u.read.offset); if (ret < 0) { spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); Loading lib/bdev/bdev.c +6 −65 Original line number Diff line number Diff line Loading @@ -113,11 +113,8 @@ spdk_bdev_io_set_rbuf(struct spdk_bdev_io *bdev_io, void *buf) { assert(bdev_io->get_rbuf_cb != NULL); assert(buf != NULL); assert(bdev_io->u.read.iovs != NULL); bdev_io->u.read.buf_unaligned = buf; bdev_io->u.read.iovs[0].iov_base = (void *)((unsigned long)((char *)buf + 512) & ~511UL); bdev_io->u.read.iovs[0].iov_len = bdev_io->u.read.len; bdev_io->u.read.buf = (void *)((unsigned long)((char *)buf + 512) & ~511UL); bdev_io->u.read.put_rbuf = true; bdev_io->get_rbuf_cb(bdev_io); } Loading @@ -131,9 +128,7 @@ spdk_bdev_io_put_rbuf(struct spdk_bdev_io *bdev_io) need_rbuf_tailq_t *tailq; uint64_t length; assert(bdev_io->u.read.iovcnt == 1); length = bdev_io->u.read.len; length = bdev_io->u.read.nbytes; buf = bdev_io->u.read.buf_unaligned; if (length <= SPDK_BDEV_SMALL_RBUF_MAX_SIZE) { Loading Loading @@ -366,7 +361,7 @@ spdk_bdev_put_io(struct spdk_bdev_io *bdev_io) static void _spdk_bdev_io_get_rbuf(struct spdk_bdev_io *bdev_io) { uint64_t len = bdev_io->u.read.len; uint64_t len = bdev_io->u.read.nbytes; struct rte_mempool *pool; need_rbuf_tailq_t *tailq; int rc; Loading Loading @@ -549,62 +544,9 @@ spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch, bdev_io->ch = ch; bdev_io->type = SPDK_BDEV_IO_TYPE_READ; bdev_io->u.read.iov.iov_base = buf; bdev_io->u.read.iov.iov_len = nbytes; bdev_io->u.read.iovs = &bdev_io->u.read.iov; bdev_io->u.read.iovcnt = 1; bdev_io->u.read.len = nbytes; bdev_io->u.read.offset = offset; bdev_io->u.read.put_rbuf = false; spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb); rc = spdk_bdev_io_submit(bdev_io); if (rc < 0) { spdk_bdev_put_io(bdev_io); return NULL; } return bdev_io; } struct spdk_bdev_io * spdk_bdev_readv(struct spdk_bdev *bdev, struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset, uint64_t nbytes, spdk_bdev_io_completion_cb cb, void *cb_arg) { struct spdk_bdev_io *bdev_io; int rc; /* Return failure if nbytes is not a multiple of bdev->blocklen */ if (nbytes % bdev->blocklen) { return NULL; } /* Return failure if offset + nbytes is less than offset; indicates there * has been an overflow and hence the offset has been wrapped around */ if ((offset + nbytes) < offset) { return NULL; } /* Return failure if offset + nbytes exceeds the size of the blockdev */ if ((offset + nbytes) > (bdev->blockcnt * bdev->blocklen)) { return NULL; } bdev_io = spdk_bdev_get_io(); if (!bdev_io) { SPDK_ERRLOG("spdk_bdev_io memory allocation failed duing read\n"); return NULL; } bdev_io->ch = ch; bdev_io->type = SPDK_BDEV_IO_TYPE_READ; bdev_io->u.read.iovs = iov; bdev_io->u.read.iovcnt = iovcnt; bdev_io->u.read.len = nbytes; bdev_io->u.read.buf = buf; bdev_io->u.read.nbytes = nbytes; bdev_io->u.read.offset = offset; bdev_io->u.read.put_rbuf = false; spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb); rc = spdk_bdev_io_submit(bdev_io); Loading Loading @@ -891,9 +833,8 @@ void spdk_bdev_io_get_rbuf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_rbuf_cb cb) { assert(cb != NULL); assert(bdev_io->u.read.iovs != NULL); if (bdev_io->u.read.iovs[0].iov_base == NULL) { if (bdev_io->u.read.buf == NULL) { bdev_io->get_rbuf_cb = cb; _spdk_bdev_io_get_rbuf(bdev_io); } else { Loading lib/bdev/malloc/blockdev_malloc.c +21 −96 Original line number Diff line number Diff line Loading @@ -70,11 +70,6 @@ malloc_done(void *ref, int status) spdk_bdev_io_complete(spdk_bdev_io_from_ctx(cp_task), bdev_status); } static void malloc_iov_done_nop(void *ref, int status) { } static struct malloc_disk *g_malloc_disk_head = NULL; int malloc_disk_count = 0; Loading Loading @@ -125,58 +120,16 @@ blockdev_malloc_destruct(struct spdk_bdev *bdev) return 0; } static int blockdev_malloc_check_iov_len(struct iovec *iovs, int iovcnt, size_t nbytes) { int i; for (i = 0; nbytes && i < iovcnt; i++) { if (nbytes < iovs[i].iov_len) break; nbytes -= iovs[i].iov_len; } /* IO len must be equal to total len of all buffers */ return nbytes != 0 || iovcnt != i; } static int64_t blockdev_malloc_readv(struct malloc_disk *mdisk, struct spdk_io_channel *ch, blockdev_malloc_read(struct malloc_disk *mdisk, struct spdk_io_channel *ch, struct copy_task *copy_req, struct iovec *iov, int iovcnt, size_t len, uint64_t offset) void *buf, uint64_t nbytes, uint64_t offset) { int64_t res = 0; copy_completion_cb completion_cb = malloc_iov_done_nop; void *src = mdisk->malloc_buf + offset; int i; if (blockdev_malloc_check_iov_len(iov, iovcnt, len)) return -1; SPDK_TRACELOG(SPDK_TRACE_MALLOC, "read %lu bytes from offset %#lx to %p\n", nbytes, offset, buf); SPDK_TRACELOG(SPDK_TRACE_MALLOC, "read %lu bytes from offset %#lx\n", len, offset); for (i = 0; i < iovcnt; i++) { /* * The copy engine will complete all copy operations in order, so * use a nop callback for the all iov completions before the last one. * Then when the last iov is completed, we will actually complete the * bdev operation back to the caller. */ if (len == iov[i].iov_len) completion_cb = malloc_done; res = spdk_copy_submit(copy_req, ch, iov[i].iov_base, src, iov[i].iov_len, completion_cb); if (res) break; src += iov[i].iov_len; len -= iov[i].iov_len; } return res; return spdk_copy_submit(copy_req, ch, buf, mdisk->malloc_buf + offset, nbytes, malloc_done); } static int64_t Loading @@ -184,37 +137,14 @@ blockdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch, struct copy_task *copy_req, struct iovec *iov, int iovcnt, size_t len, uint64_t offset) { int64_t res = 0; copy_completion_cb completion_cb = malloc_iov_done_nop; void *dst = mdisk->malloc_buf + offset; int i; if (blockdev_malloc_check_iov_len(iov, iovcnt, len)) if ((iovcnt != 1) || (iov->iov_len != len)) return -1; SPDK_TRACELOG(SPDK_TRACE_MALLOC, "wrote %lu bytes to offset %#lx\n", len, offset); for (i = 0; i < iovcnt; i++) { /* * The copy engine will complete all copy operations in order, so * use a nop callback for the all iov completions before the last one. * Then when the last iov is completed, we will actually complete the * bdev operation back to the caller. */ if (len == iov[i].iov_len) completion_cb = malloc_done; res = spdk_copy_submit(copy_req, ch, dst, iov[i].iov_base, iov[i].iov_len, completion_cb); if (res) break; dst += iov[i].iov_len; len -= iov[i].iov_len; } SPDK_TRACELOG(SPDK_TRACE_MALLOC, "wrote %lu bytes to offset %#lx from %p\n", iov->iov_len, offset, iov->iov_base); return res; return spdk_copy_submit(copy_req, ch, mdisk->malloc_buf + offset, iov->iov_base, len, malloc_done); } static int Loading Loading @@ -268,24 +198,19 @@ static int _blockdev_malloc_submit_request(struct spdk_bdev_io *bdev_io) { switch (bdev_io->type) { case SPDK_BDEV_IO_TYPE_READ: if (bdev_io->u.read.iovs[0].iov_base == NULL) { assert(bdev_io->u.read.iovcnt == 1); bdev_io->u.read.iovs[0].iov_base = ((struct malloc_disk *)bdev_io->ctx)->malloc_buf + if (bdev_io->u.read.buf == NULL) { bdev_io->u.read.buf = ((struct malloc_disk *)bdev_io->ctx)->malloc_buf + bdev_io->u.read.offset; bdev_io->u.read.iovs[0].iov_len = bdev_io->u.read.len; bdev_io->u.read.put_rbuf = false; spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bdev_io->driver_ctx), SPDK_BDEV_IO_STATUS_SUCCESS); return 0; } return blockdev_malloc_readv((struct malloc_disk *)bdev_io->ctx, return blockdev_malloc_read((struct malloc_disk *)bdev_io->ctx, bdev_io->ch, (struct copy_task *)bdev_io->driver_ctx, bdev_io->u.read.iovs, bdev_io->u.read.iovcnt, bdev_io->u.read.len, bdev_io->u.read.buf, bdev_io->u.read.nbytes, bdev_io->u.read.offset); case SPDK_BDEV_IO_TYPE_WRITE: Loading lib/bdev/nvme/blockdev_nvme.c +29 −87 Original line number Diff line number Diff line Loading @@ -89,17 +89,7 @@ struct nvme_io_channel { #define NVME_DEFAULT_MAX_UNMAP_BDESC_COUNT 1 struct nvme_blockio { /** array of iovecs to transfer. */ struct iovec *iovs; /** Number of iovecs in iovs array. */ int iovcnt; /** Current iovec position. */ int iovpos; /** Offset in current iovec. */ uint32_t iov_offset; int reserved; }; enum data_direction { Loading Loading @@ -131,10 +121,9 @@ static void nvme_ctrlr_initialize_blockdevs(struct spdk_nvme_ctrlr *ctrlr, int bdev_per_ns, int ctrlr_id); static int nvme_library_init(void); static void nvme_library_fini(void); static int nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair, int nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair, struct nvme_blockio *bio, int direction, struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset); int direction, void *buf, uint64_t nbytes, uint64_t offset); static int nvme_get_ctx_size(void) Loading @@ -146,18 +135,17 @@ SPDK_BDEV_MODULE_REGISTER(nvme_library_init, NULL, blockdev_nvme_get_spdk_runnin nvme_get_ctx_size) static int64_t blockdev_nvme_readv(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch, blockdev_nvme_read(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch, struct nvme_blockio *bio, struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset) void *buf, uint64_t nbytes, uint64_t offset) { struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch); int64_t rc; SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "read %lu bytes with offset %#lx\n", nbytes, offset); SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "read %lu bytes with offset %#lx to %p\n", nbytes, offset, buf); rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_READ, iov, iovcnt, nbytes, offset); rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_READ, buf, nbytes, offset); if (rc < 0) return -1; Loading @@ -172,15 +160,18 @@ blockdev_nvme_writev(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch, struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch); int64_t rc; SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "write %lu bytes with offset %#lx\n", len, offset); if ((iovcnt != 1) || (iov->iov_len != len)) return -1; rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_WRITE, iov, iovcnt, len, offset); SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "write %lu bytes with offset %#lx from %p\n", iov->iov_len, offset, iov->iov_base); rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_WRITE, (void *)iov->iov_base, iov->iov_len, offset); if (rc < 0) return -1; return len; return iov->iov_len; } static void Loading Loading @@ -232,12 +223,11 @@ static void blockdev_nvme_get_rbuf_cb(struct spdk_bdev_io *bdev_io) { int ret; ret = blockdev_nvme_readv((struct nvme_blockdev *)bdev_io->ctx, ret = blockdev_nvme_read((struct nvme_blockdev *)bdev_io->ctx, bdev_io->ch, (struct nvme_blockio *)bdev_io->driver_ctx, bdev_io->u.read.iovs, bdev_io->u.read.iovcnt, bdev_io->u.read.len, bdev_io->u.read.buf, bdev_io->u.read.nbytes, bdev_io->u.read.offset); if (ret < 0) { Loading Loading @@ -618,51 +608,10 @@ queued_done(void *ref, const struct spdk_nvme_cpl *cpl) spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bio), status); } static void queued_reset_sgl(void *ref, uint32_t sgl_offset) { struct nvme_blockio *bio = ref; struct iovec *iov; bio->iov_offset = sgl_offset; for (bio->iovpos = 0; bio->iovpos < bio->iovcnt; bio->iovpos++) { iov = &bio->iovs[bio->iovpos]; if (bio->iov_offset < iov->iov_len) break; bio->iov_offset -= iov->iov_len; } } static int queued_next_sge(void *ref, uint64_t *address, uint32_t *length) { struct nvme_blockio *bio = ref; struct iovec *iov; assert(bio->iovpos < bio->iovcnt); iov = &bio->iovs[bio->iovpos]; bio->iovpos++; *address = spdk_vtophys(iov->iov_base); *length = iov->iov_len; if (bio->iov_offset) { assert(bio->iov_offset <= iov->iov_len); *address += bio->iov_offset; *length -= bio->iov_offset; bio->iov_offset = 0; } return 0; } int nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair, struct nvme_blockio *bio, int direction, struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset) int direction, void *buf, uint64_t nbytes, uint64_t offset) { uint32_t ss = spdk_nvme_ns_get_sector_size(bdev->ns); uint32_t lba_count; Loading @@ -678,19 +627,12 @@ nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair, lba_count = nbytes / ss; bio->iovs = iov; bio->iovcnt = iovcnt; bio->iovpos = 0; bio->iov_offset = 0; if (direction == BDEV_DISK_READ) { rc = spdk_nvme_ns_cmd_readv(bdev->ns, qpair, next_lba, lba_count, queued_done, bio, 0, queued_reset_sgl, queued_next_sge); rc = spdk_nvme_ns_cmd_read(bdev->ns, qpair, buf, next_lba, lba_count, queued_done, bio, 0); } else { rc = spdk_nvme_ns_cmd_writev(bdev->ns, qpair, next_lba, lba_count, queued_done, bio, 0, queued_reset_sgl, queued_next_sge); rc = spdk_nvme_ns_cmd_write(bdev->ns, qpair, buf, next_lba, lba_count, queued_done, bio, 0); } if (rc != 0) { Loading Loading
include/spdk/bdev.h +4 −15 Original line number Diff line number Diff line Loading @@ -189,17 +189,11 @@ struct spdk_bdev_io { /** The unaligned rbuf originally allocated. */ void *buf_unaligned; /** For basic read case, use our own iovec element. */ struct iovec iov; /** For SG buffer cases, array of iovecs to transfer. */ struct iovec *iovs; /** For single buffer cases, pointer to the aligned data buffer. */ void *buf; /** For SG buffer cases, number of iovecs in iovec array. */ int iovcnt; /** For SG buffer cases, total size of data to be transferred. */ size_t len; /** For single buffer cases, size of the data buffer. */ uint64_t nbytes; /** Starting offset (in bytes) of the blockdev for this I/O. */ uint64_t offset; Loading Loading @@ -285,11 +279,6 @@ bool spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type struct spdk_bdev_io *spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch, void *buf, uint64_t offset, uint64_t nbytes, spdk_bdev_io_completion_cb cb, void *cb_arg); struct spdk_bdev_io * spdk_bdev_readv(struct spdk_bdev *bdev, struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset, uint64_t nbytes, spdk_bdev_io_completion_cb cb, void *cb_arg); struct spdk_bdev_io *spdk_bdev_write(struct spdk_bdev *bdev, struct spdk_io_channel *ch, void *buf, uint64_t offset, uint64_t nbytes, spdk_bdev_io_completion_cb cb, void *cb_arg); Loading
lib/bdev/aio/blockdev_aio.c +14 −16 Original line number Diff line number Diff line Loading @@ -98,9 +98,8 @@ blockdev_aio_close(struct file_disk *disk) } static int64_t blockdev_aio_readv(struct file_disk *fdisk, struct spdk_io_channel *ch, struct blockdev_aio_task *aio_task, struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset) blockdev_aio_read(struct file_disk *fdisk, struct spdk_io_channel *ch, struct blockdev_aio_task *aio_task, void *buf, uint64_t nbytes, uint64_t offset) { struct iocb *iocb = &aio_task->iocb; struct blockdev_aio_io_channel *aio_ch = spdk_io_channel_get_ctx(ch); Loading @@ -108,15 +107,15 @@ blockdev_aio_readv(struct file_disk *fdisk, struct spdk_io_channel *ch, iocb->aio_fildes = fdisk->fd; iocb->aio_reqprio = 0; iocb->aio_lio_opcode = IO_CMD_PREADV; iocb->u.v.vec = iov; iocb->u.v.nr = iovcnt; iocb->u.v.offset = offset; iocb->aio_lio_opcode = IO_CMD_PREAD; iocb->u.c.buf = buf; iocb->u.c.nbytes = nbytes; iocb->u.c.offset = offset; iocb->data = aio_task; aio_task->len = nbytes; SPDK_TRACELOG(SPDK_TRACE_AIO, "read %d iovs size %lu to off: %#lx\n", iovcnt, nbytes, offset); SPDK_TRACELOG(SPDK_TRACE_AIO, "read from %p of size %lu to off: %#lx\n", buf, nbytes, offset); rc = io_submit(aio_ch->io_ctx, 1, &iocb); if (rc < 0) { Loading Loading @@ -246,13 +245,12 @@ static void blockdev_aio_get_rbuf_cb(struct spdk_bdev_io *bdev_io) { int ret = 0; ret = blockdev_aio_readv((struct file_disk *)bdev_io->ctx, ret = blockdev_aio_read((struct file_disk *)bdev_io->ctx, bdev_io->ch, (struct blockdev_aio_task *)bdev_io->driver_ctx, bdev_io->u.read.iovs, bdev_io->u.read.iovcnt, bdev_io->u.read.offset, bdev_io->u.read.len); bdev_io->u.read.buf, bdev_io->u.read.nbytes, bdev_io->u.read.offset); if (ret < 0) { spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); Loading
lib/bdev/bdev.c +6 −65 Original line number Diff line number Diff line Loading @@ -113,11 +113,8 @@ spdk_bdev_io_set_rbuf(struct spdk_bdev_io *bdev_io, void *buf) { assert(bdev_io->get_rbuf_cb != NULL); assert(buf != NULL); assert(bdev_io->u.read.iovs != NULL); bdev_io->u.read.buf_unaligned = buf; bdev_io->u.read.iovs[0].iov_base = (void *)((unsigned long)((char *)buf + 512) & ~511UL); bdev_io->u.read.iovs[0].iov_len = bdev_io->u.read.len; bdev_io->u.read.buf = (void *)((unsigned long)((char *)buf + 512) & ~511UL); bdev_io->u.read.put_rbuf = true; bdev_io->get_rbuf_cb(bdev_io); } Loading @@ -131,9 +128,7 @@ spdk_bdev_io_put_rbuf(struct spdk_bdev_io *bdev_io) need_rbuf_tailq_t *tailq; uint64_t length; assert(bdev_io->u.read.iovcnt == 1); length = bdev_io->u.read.len; length = bdev_io->u.read.nbytes; buf = bdev_io->u.read.buf_unaligned; if (length <= SPDK_BDEV_SMALL_RBUF_MAX_SIZE) { Loading Loading @@ -366,7 +361,7 @@ spdk_bdev_put_io(struct spdk_bdev_io *bdev_io) static void _spdk_bdev_io_get_rbuf(struct spdk_bdev_io *bdev_io) { uint64_t len = bdev_io->u.read.len; uint64_t len = bdev_io->u.read.nbytes; struct rte_mempool *pool; need_rbuf_tailq_t *tailq; int rc; Loading Loading @@ -549,62 +544,9 @@ spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch, bdev_io->ch = ch; bdev_io->type = SPDK_BDEV_IO_TYPE_READ; bdev_io->u.read.iov.iov_base = buf; bdev_io->u.read.iov.iov_len = nbytes; bdev_io->u.read.iovs = &bdev_io->u.read.iov; bdev_io->u.read.iovcnt = 1; bdev_io->u.read.len = nbytes; bdev_io->u.read.offset = offset; bdev_io->u.read.put_rbuf = false; spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb); rc = spdk_bdev_io_submit(bdev_io); if (rc < 0) { spdk_bdev_put_io(bdev_io); return NULL; } return bdev_io; } struct spdk_bdev_io * spdk_bdev_readv(struct spdk_bdev *bdev, struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset, uint64_t nbytes, spdk_bdev_io_completion_cb cb, void *cb_arg) { struct spdk_bdev_io *bdev_io; int rc; /* Return failure if nbytes is not a multiple of bdev->blocklen */ if (nbytes % bdev->blocklen) { return NULL; } /* Return failure if offset + nbytes is less than offset; indicates there * has been an overflow and hence the offset has been wrapped around */ if ((offset + nbytes) < offset) { return NULL; } /* Return failure if offset + nbytes exceeds the size of the blockdev */ if ((offset + nbytes) > (bdev->blockcnt * bdev->blocklen)) { return NULL; } bdev_io = spdk_bdev_get_io(); if (!bdev_io) { SPDK_ERRLOG("spdk_bdev_io memory allocation failed duing read\n"); return NULL; } bdev_io->ch = ch; bdev_io->type = SPDK_BDEV_IO_TYPE_READ; bdev_io->u.read.iovs = iov; bdev_io->u.read.iovcnt = iovcnt; bdev_io->u.read.len = nbytes; bdev_io->u.read.buf = buf; bdev_io->u.read.nbytes = nbytes; bdev_io->u.read.offset = offset; bdev_io->u.read.put_rbuf = false; spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb); rc = spdk_bdev_io_submit(bdev_io); Loading Loading @@ -891,9 +833,8 @@ void spdk_bdev_io_get_rbuf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_rbuf_cb cb) { assert(cb != NULL); assert(bdev_io->u.read.iovs != NULL); if (bdev_io->u.read.iovs[0].iov_base == NULL) { if (bdev_io->u.read.buf == NULL) { bdev_io->get_rbuf_cb = cb; _spdk_bdev_io_get_rbuf(bdev_io); } else { Loading
lib/bdev/malloc/blockdev_malloc.c +21 −96 Original line number Diff line number Diff line Loading @@ -70,11 +70,6 @@ malloc_done(void *ref, int status) spdk_bdev_io_complete(spdk_bdev_io_from_ctx(cp_task), bdev_status); } static void malloc_iov_done_nop(void *ref, int status) { } static struct malloc_disk *g_malloc_disk_head = NULL; int malloc_disk_count = 0; Loading Loading @@ -125,58 +120,16 @@ blockdev_malloc_destruct(struct spdk_bdev *bdev) return 0; } static int blockdev_malloc_check_iov_len(struct iovec *iovs, int iovcnt, size_t nbytes) { int i; for (i = 0; nbytes && i < iovcnt; i++) { if (nbytes < iovs[i].iov_len) break; nbytes -= iovs[i].iov_len; } /* IO len must be equal to total len of all buffers */ return nbytes != 0 || iovcnt != i; } static int64_t blockdev_malloc_readv(struct malloc_disk *mdisk, struct spdk_io_channel *ch, blockdev_malloc_read(struct malloc_disk *mdisk, struct spdk_io_channel *ch, struct copy_task *copy_req, struct iovec *iov, int iovcnt, size_t len, uint64_t offset) void *buf, uint64_t nbytes, uint64_t offset) { int64_t res = 0; copy_completion_cb completion_cb = malloc_iov_done_nop; void *src = mdisk->malloc_buf + offset; int i; if (blockdev_malloc_check_iov_len(iov, iovcnt, len)) return -1; SPDK_TRACELOG(SPDK_TRACE_MALLOC, "read %lu bytes from offset %#lx to %p\n", nbytes, offset, buf); SPDK_TRACELOG(SPDK_TRACE_MALLOC, "read %lu bytes from offset %#lx\n", len, offset); for (i = 0; i < iovcnt; i++) { /* * The copy engine will complete all copy operations in order, so * use a nop callback for the all iov completions before the last one. * Then when the last iov is completed, we will actually complete the * bdev operation back to the caller. */ if (len == iov[i].iov_len) completion_cb = malloc_done; res = spdk_copy_submit(copy_req, ch, iov[i].iov_base, src, iov[i].iov_len, completion_cb); if (res) break; src += iov[i].iov_len; len -= iov[i].iov_len; } return res; return spdk_copy_submit(copy_req, ch, buf, mdisk->malloc_buf + offset, nbytes, malloc_done); } static int64_t Loading @@ -184,37 +137,14 @@ blockdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch, struct copy_task *copy_req, struct iovec *iov, int iovcnt, size_t len, uint64_t offset) { int64_t res = 0; copy_completion_cb completion_cb = malloc_iov_done_nop; void *dst = mdisk->malloc_buf + offset; int i; if (blockdev_malloc_check_iov_len(iov, iovcnt, len)) if ((iovcnt != 1) || (iov->iov_len != len)) return -1; SPDK_TRACELOG(SPDK_TRACE_MALLOC, "wrote %lu bytes to offset %#lx\n", len, offset); for (i = 0; i < iovcnt; i++) { /* * The copy engine will complete all copy operations in order, so * use a nop callback for the all iov completions before the last one. * Then when the last iov is completed, we will actually complete the * bdev operation back to the caller. */ if (len == iov[i].iov_len) completion_cb = malloc_done; res = spdk_copy_submit(copy_req, ch, dst, iov[i].iov_base, iov[i].iov_len, completion_cb); if (res) break; dst += iov[i].iov_len; len -= iov[i].iov_len; } SPDK_TRACELOG(SPDK_TRACE_MALLOC, "wrote %lu bytes to offset %#lx from %p\n", iov->iov_len, offset, iov->iov_base); return res; return spdk_copy_submit(copy_req, ch, mdisk->malloc_buf + offset, iov->iov_base, len, malloc_done); } static int Loading Loading @@ -268,24 +198,19 @@ static int _blockdev_malloc_submit_request(struct spdk_bdev_io *bdev_io) { switch (bdev_io->type) { case SPDK_BDEV_IO_TYPE_READ: if (bdev_io->u.read.iovs[0].iov_base == NULL) { assert(bdev_io->u.read.iovcnt == 1); bdev_io->u.read.iovs[0].iov_base = ((struct malloc_disk *)bdev_io->ctx)->malloc_buf + if (bdev_io->u.read.buf == NULL) { bdev_io->u.read.buf = ((struct malloc_disk *)bdev_io->ctx)->malloc_buf + bdev_io->u.read.offset; bdev_io->u.read.iovs[0].iov_len = bdev_io->u.read.len; bdev_io->u.read.put_rbuf = false; spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bdev_io->driver_ctx), SPDK_BDEV_IO_STATUS_SUCCESS); return 0; } return blockdev_malloc_readv((struct malloc_disk *)bdev_io->ctx, return blockdev_malloc_read((struct malloc_disk *)bdev_io->ctx, bdev_io->ch, (struct copy_task *)bdev_io->driver_ctx, bdev_io->u.read.iovs, bdev_io->u.read.iovcnt, bdev_io->u.read.len, bdev_io->u.read.buf, bdev_io->u.read.nbytes, bdev_io->u.read.offset); case SPDK_BDEV_IO_TYPE_WRITE: Loading
lib/bdev/nvme/blockdev_nvme.c +29 −87 Original line number Diff line number Diff line Loading @@ -89,17 +89,7 @@ struct nvme_io_channel { #define NVME_DEFAULT_MAX_UNMAP_BDESC_COUNT 1 struct nvme_blockio { /** array of iovecs to transfer. */ struct iovec *iovs; /** Number of iovecs in iovs array. */ int iovcnt; /** Current iovec position. */ int iovpos; /** Offset in current iovec. */ uint32_t iov_offset; int reserved; }; enum data_direction { Loading Loading @@ -131,10 +121,9 @@ static void nvme_ctrlr_initialize_blockdevs(struct spdk_nvme_ctrlr *ctrlr, int bdev_per_ns, int ctrlr_id); static int nvme_library_init(void); static void nvme_library_fini(void); static int nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair, int nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair, struct nvme_blockio *bio, int direction, struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset); int direction, void *buf, uint64_t nbytes, uint64_t offset); static int nvme_get_ctx_size(void) Loading @@ -146,18 +135,17 @@ SPDK_BDEV_MODULE_REGISTER(nvme_library_init, NULL, blockdev_nvme_get_spdk_runnin nvme_get_ctx_size) static int64_t blockdev_nvme_readv(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch, blockdev_nvme_read(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch, struct nvme_blockio *bio, struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset) void *buf, uint64_t nbytes, uint64_t offset) { struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch); int64_t rc; SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "read %lu bytes with offset %#lx\n", nbytes, offset); SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "read %lu bytes with offset %#lx to %p\n", nbytes, offset, buf); rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_READ, iov, iovcnt, nbytes, offset); rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_READ, buf, nbytes, offset); if (rc < 0) return -1; Loading @@ -172,15 +160,18 @@ blockdev_nvme_writev(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch, struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch); int64_t rc; SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "write %lu bytes with offset %#lx\n", len, offset); if ((iovcnt != 1) || (iov->iov_len != len)) return -1; rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_WRITE, iov, iovcnt, len, offset); SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "write %lu bytes with offset %#lx from %p\n", iov->iov_len, offset, iov->iov_base); rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_WRITE, (void *)iov->iov_base, iov->iov_len, offset); if (rc < 0) return -1; return len; return iov->iov_len; } static void Loading Loading @@ -232,12 +223,11 @@ static void blockdev_nvme_get_rbuf_cb(struct spdk_bdev_io *bdev_io) { int ret; ret = blockdev_nvme_readv((struct nvme_blockdev *)bdev_io->ctx, ret = blockdev_nvme_read((struct nvme_blockdev *)bdev_io->ctx, bdev_io->ch, (struct nvme_blockio *)bdev_io->driver_ctx, bdev_io->u.read.iovs, bdev_io->u.read.iovcnt, bdev_io->u.read.len, bdev_io->u.read.buf, bdev_io->u.read.nbytes, bdev_io->u.read.offset); if (ret < 0) { Loading Loading @@ -618,51 +608,10 @@ queued_done(void *ref, const struct spdk_nvme_cpl *cpl) spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bio), status); } static void queued_reset_sgl(void *ref, uint32_t sgl_offset) { struct nvme_blockio *bio = ref; struct iovec *iov; bio->iov_offset = sgl_offset; for (bio->iovpos = 0; bio->iovpos < bio->iovcnt; bio->iovpos++) { iov = &bio->iovs[bio->iovpos]; if (bio->iov_offset < iov->iov_len) break; bio->iov_offset -= iov->iov_len; } } static int queued_next_sge(void *ref, uint64_t *address, uint32_t *length) { struct nvme_blockio *bio = ref; struct iovec *iov; assert(bio->iovpos < bio->iovcnt); iov = &bio->iovs[bio->iovpos]; bio->iovpos++; *address = spdk_vtophys(iov->iov_base); *length = iov->iov_len; if (bio->iov_offset) { assert(bio->iov_offset <= iov->iov_len); *address += bio->iov_offset; *length -= bio->iov_offset; bio->iov_offset = 0; } return 0; } int nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair, struct nvme_blockio *bio, int direction, struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset) int direction, void *buf, uint64_t nbytes, uint64_t offset) { uint32_t ss = spdk_nvme_ns_get_sector_size(bdev->ns); uint32_t lba_count; Loading @@ -678,19 +627,12 @@ nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair, lba_count = nbytes / ss; bio->iovs = iov; bio->iovcnt = iovcnt; bio->iovpos = 0; bio->iov_offset = 0; if (direction == BDEV_DISK_READ) { rc = spdk_nvme_ns_cmd_readv(bdev->ns, qpair, next_lba, lba_count, queued_done, bio, 0, queued_reset_sgl, queued_next_sge); rc = spdk_nvme_ns_cmd_read(bdev->ns, qpair, buf, next_lba, lba_count, queued_done, bio, 0); } else { rc = spdk_nvme_ns_cmd_writev(bdev->ns, qpair, next_lba, lba_count, queued_done, bio, 0, queued_reset_sgl, queued_next_sge); rc = spdk_nvme_ns_cmd_write(bdev->ns, qpair, buf, next_lba, lba_count, queued_done, bio, 0); } if (rc != 0) { Loading