Commit 13ed9986 authored by Jim Harris's avatar Jim Harris Committed by Changpeng Liu
Browse files

nvme: make basic request functions static inline



This reduces submission+completion time by 10-15
core clocks per IO on an Intel Xeon Platinum
processor.  Similar improvements should be seen
on other processors as well.

Signed-off-by: default avatarJim Harris <james.r.harris@intel.com>
Change-Id: I3241ba53ef5f21a8eef930b523a951525922e6b8

Reviewed-on: https://review.gerrithub.io/413284


Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
parent 9c4679bc
Loading
Loading
Loading
Loading
+1 −66
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@
#define SPDK_NVME_DRIVER_NAME "spdk_nvme_driver"

struct nvme_driver	*g_spdk_nvme_driver;
static pid_t g_spdk_nvme_pid;
pid_t			g_spdk_nvme_pid;

int32_t			spdk_nvme_retry_count;

@@ -142,61 +142,6 @@ spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
	return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
}

struct nvme_request *
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
		      const struct nvme_payload *payload, uint32_t payload_size,
		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request *req;

	req = STAILQ_FIRST(&qpair->free_req);
	if (req == NULL) {
		return req;
	}

	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);

	/*
	 * Only memset/zero fields that need it.  All other fields
	 *  will be initialized appropriately either later in this
	 *  function, or before they are needed later in the
	 *  submission patch.  For example, the children
	 *  TAILQ_ENTRY and following members are
	 *  only used as part of I/O splitting so we avoid
	 *  memsetting them until it is actually needed.
	 *  They will be initialized in nvme_request_add_child()
	 *  if the request is split.
	 */
	memset(req, 0, offsetof(struct nvme_request, payload_size));

	req->cb_fn = cb_fn;
	req->cb_arg = cb_arg;
	req->payload = *payload;
	req->payload_size = payload_size;
	req->qpair = qpair;
	req->pid = g_spdk_nvme_pid;

	return req;
}

struct nvme_request *
nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
			     void *buffer, uint32_t payload_size,
			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_payload payload;

	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);

	return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
}

struct nvme_request *
nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
}

static void
nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl)
{
@@ -261,16 +206,6 @@ nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
	return req;
}

void
nvme_free_request(struct nvme_request *req)
{
	assert(req != NULL);
	assert(req->num_children == 0);
	assert(req->qpair != NULL);

	STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
}

int
nvme_robust_mutex_init_shared(pthread_mutex_t *mtx)
{
+76 −10
Original line number Diff line number Diff line
@@ -55,6 +55,8 @@
#include "spdk_internal/assert.h"
#include "spdk_internal/log.h"

extern pid_t g_spdk_nvme_pid;

/*
 * Some Intel devices support vendor-unique read latency log page even
 * though the log page directory says otherwise.
@@ -638,19 +640,83 @@ int nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
			  struct spdk_nvme_ctrlr *ctrlr);
void	nvme_ns_destruct(struct spdk_nvme_ns *ns);

struct nvme_request *nvme_allocate_request(struct spdk_nvme_qpair *qpair,
		const struct nvme_payload *payload,
		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
struct nvme_request *nvme_allocate_request_null(struct spdk_nvme_qpair *qpair,
		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
struct nvme_request *nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
static inline struct nvme_request *
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
		      const struct nvme_payload *payload, uint32_t payload_size,
		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request *req;

	req = STAILQ_FIRST(&qpair->free_req);
	if (req == NULL) {
		return req;
	}

	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);

	/*
	 * Only memset/zero fields that need it.  All other fields
	 *  will be initialized appropriately either later in this
	 *  function, or before they are needed later in the
	 *  submission patch.  For example, the children
	 *  TAILQ_ENTRY and following members are
	 *  only used as part of I/O splitting so we avoid
	 *  memsetting them until it is actually needed.
	 *  They will be initialized in nvme_request_add_child()
	 *  if the request is split.
	 */
	memset(req, 0, offsetof(struct nvme_request, payload_size));

	req->cb_fn = cb_fn;
	req->cb_arg = cb_arg;
	req->payload = *payload;
	req->payload_size = payload_size;
	req->qpair = qpair;
	req->pid = g_spdk_nvme_pid;

	return req;
}

static inline struct nvme_request *
nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
			     void *buffer, uint32_t payload_size,
		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_payload payload;

	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);

	return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
}

static inline struct nvme_request *
nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
}

struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
		void *buffer, uint32_t payload_size,
		spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
void	nvme_complete_request(struct nvme_request *req, struct spdk_nvme_cpl *cpl);
void	nvme_free_request(struct nvme_request *req);

static inline void
nvme_complete_request(struct nvme_request *req, struct spdk_nvme_cpl *cpl)
{
	if (req->cb_fn) {
		req->cb_fn(req->cb_arg, cpl);
	}
}

static inline void
nvme_free_request(struct nvme_request *req)
{
	assert(req != NULL);
	assert(req->num_children == 0);
	assert(req->qpair != NULL);

	STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
}

void	nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child);
uint64_t nvme_get_quirks(const struct spdk_pci_id *id);

+0 −8
Original line number Diff line number Diff line
@@ -448,14 +448,6 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
	return nvme_transport_qpair_submit_request(qpair, req);
}

void
nvme_complete_request(struct nvme_request *req, struct spdk_nvme_cpl *cpl)
{
	if (req->cb_fn) {
		req->cb_fn(req->cb_arg, cpl);
	}
}

static void
_nvme_io_qpair_enable(struct spdk_nvme_qpair *qpair)
{
+17 −55
Original line number Diff line number Diff line
@@ -47,6 +47,8 @@ struct spdk_trace_flag SPDK_LOG_NVME = {
#include "nvme/nvme_ctrlr.c"
#include "nvme/nvme_quirks.c"

pid_t g_spdk_nvme_pid;

struct nvme_driver _g_nvme_driver = {
	.lock = PTHREAD_MUTEX_INITIALIZER,
};
@@ -231,10 +233,8 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
	CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);

	/*
	 * Free the request here so it does not leak.
	 * For the purposes of this unit test, we don't need to bother emulating request submission.
	 */
	free(req);

	return 0;
}
@@ -412,57 +412,19 @@ nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
	return 0;
}

struct nvme_request *
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
		      const struct nvme_payload *payload, uint32_t payload_size,
		      spdk_nvme_cmd_cb cb_fn,
		      void *cb_arg)
{
	struct nvme_request *req = NULL;
	req = calloc(1, sizeof(*req));

	if (req != NULL) {
		memset(req, 0, offsetof(struct nvme_request, children));

		req->payload = *payload;
		req->payload_size = payload_size;

		req->cb_fn = cb_fn;
		req->cb_arg = cb_arg;
		req->qpair = qpair;
		req->pid = getpid();
	}

	return req;
}

struct nvme_request *
nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_payload payload;

	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);

	return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
}

struct nvme_request *
nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
}

void
nvme_free_request(struct nvme_request *req)
{
	free(req);
}
#define DECLARE_AND_CONSTRUCT_CTRLR()	\
	struct spdk_nvme_ctrlr	ctrlr = {};	\
	struct spdk_nvme_qpair	adminq = {};	\
	struct nvme_request	req;		\
						\
	STAILQ_INIT(&adminq.free_req);		\
	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);	\
	ctrlr.adminq = &adminq;

static void
test_nvme_ctrlr_init_en_1_rdy_0(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));

@@ -516,7 +478,7 @@ test_nvme_ctrlr_init_en_1_rdy_0(void)
static void
test_nvme_ctrlr_init_en_1_rdy_1(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));

@@ -563,7 +525,7 @@ test_nvme_ctrlr_init_en_1_rdy_1(void)
static void
test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));

@@ -731,7 +693,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
static void
test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));

@@ -900,7 +862,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
static void
test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));

@@ -1070,7 +1032,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
static void
test_nvme_ctrlr_init_en_0_rdy_0(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));

@@ -1109,7 +1071,7 @@ test_nvme_ctrlr_init_en_0_rdy_0(void)
static void
test_nvme_ctrlr_init_en_0_rdy_1(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));

+32 −73
Original line number Diff line number Diff line
@@ -38,6 +38,8 @@

#define CTRLR_CDATA_ELPE   5

pid_t g_spdk_nvme_pid;

struct nvme_request g_req;

uint32_t error_num_entries;
@@ -245,44 +247,6 @@ static void verify_fw_image_download(struct nvme_request *req)
	CU_ASSERT(req->cmd.cdw11 == fw_img_offset >> 2);
}

struct nvme_request *
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
		      const struct nvme_payload *payload, uint32_t payload_size,
		      spdk_nvme_cmd_cb cb_fn,
		      void *cb_arg)
{
	struct nvme_request *req = &g_req;

	memset(req, 0, sizeof(*req));

	req->payload = *payload;
	req->payload_size = payload_size;

	req->cb_fn = cb_fn;
	req->cb_arg = cb_arg;
	req->qpair = qpair;
	req->pid = getpid();

	return req;
}

struct nvme_request *
nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_payload payload;

	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);

	return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
}

struct nvme_request *
nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
}

struct nvme_request *
nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
				spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
@@ -291,20 +255,6 @@ nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uin
	return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
}

void
nvme_complete_request(struct nvme_request *req, struct spdk_nvme_cpl *cpl)
{
	if (req->cb_fn) {
		req->cb_fn(req->cb_arg, cpl);
	}
}

void
nvme_free_request(struct nvme_request *req)
{
	return;
}

int
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
@@ -325,10 +275,19 @@ nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_reque
	return 0;
}

#define DECLARE_AND_CONSTRUCT_CTRLR()	\
	struct spdk_nvme_ctrlr	ctrlr = {};	\
	struct spdk_nvme_qpair	adminq = {};	\
	struct nvme_request	req;		\
						\
	STAILQ_INIT(&adminq.free_req);		\
	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);	\
	ctrlr.adminq = &adminq;

static void
test_firmware_get_log_page(void)
{
	struct spdk_nvme_ctrlr			ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_firmware_page		payload = {};

	verify_fn = verify_firmware_log_page;
@@ -341,7 +300,7 @@ test_firmware_get_log_page(void)
static void
test_health_get_log_page(void)
{
	struct spdk_nvme_ctrlr				ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_health_information_page	payload = {};

	verify_fn = verify_health_log_page;
@@ -354,7 +313,7 @@ test_health_get_log_page(void)
static void
test_error_get_log_page(void)
{
	struct spdk_nvme_ctrlr				ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_error_information_entry	payload = {};

	ctrlr.cdata.elpe = CTRLR_CDATA_ELPE;
@@ -369,7 +328,7 @@ test_error_get_log_page(void)

static void test_intel_smart_get_log_page(void)
{
	struct spdk_nvme_ctrlr			ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_intel_smart_information_page	payload = {};

	verify_fn = verify_intel_smart_log_page;
@@ -380,7 +339,7 @@ static void test_intel_smart_get_log_page(void)

static void test_intel_temperature_get_log_page(void)
{
	struct spdk_nvme_ctrlr			ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_intel_temperature_page	payload = {};

	verify_fn = verify_intel_temperature_log_page;
@@ -391,7 +350,7 @@ static void test_intel_temperature_get_log_page(void)

static void test_intel_read_latency_get_log_page(void)
{
	struct spdk_nvme_ctrlr			ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_intel_rw_latency_page	payload = {};

	verify_fn = verify_intel_read_latency_log_page;
@@ -403,7 +362,7 @@ static void test_intel_read_latency_get_log_page(void)

static void test_intel_write_latency_get_log_page(void)
{
	struct spdk_nvme_ctrlr			ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_intel_rw_latency_page	payload = {};

	verify_fn = verify_intel_write_latency_log_page;
@@ -415,7 +374,7 @@ static void test_intel_write_latency_get_log_page(void)

static void test_intel_get_log_page_directory(void)
{
	struct spdk_nvme_ctrlr				ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_intel_log_page_directory	payload = {};

	verify_fn = verify_intel_get_log_page_directory;
@@ -427,7 +386,7 @@ static void test_intel_get_log_page_directory(void)

static void test_intel_marketing_description_get_log_page(void)
{
	struct spdk_nvme_ctrlr					ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_intel_marketing_description_page	payload = {};

	verify_fn = verify_intel_marketing_description_log_page;
@@ -457,7 +416,7 @@ static void test_intel_get_log_pages(void)
static void
test_set_feature_cmd(void)
{
	struct spdk_nvme_ctrlr  ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	verify_fn = verify_set_feature_cmd;

@@ -468,7 +427,7 @@ test_set_feature_cmd(void)
static void
test_get_feature_cmd(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	verify_fn = verify_get_feature_cmd;

@@ -478,7 +437,7 @@ test_get_feature_cmd(void)
static void
test_abort_cmd(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_qpair	qpair = {};

	STAILQ_INIT(&ctrlr.queued_aborts);
@@ -492,7 +451,7 @@ test_abort_cmd(void)
static void
test_io_raw_cmd(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_qpair	qpair = {};
	struct spdk_nvme_cmd	cmd = {};

@@ -504,7 +463,7 @@ test_io_raw_cmd(void)
static void
test_io_raw_cmd_with_md(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_qpair	qpair = {};
	struct spdk_nvme_cmd	cmd = {};

@@ -523,7 +482,7 @@ test_get_log_pages(void)
static void
test_namespace_attach(void)
{
	struct spdk_nvme_ctrlr			ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_ctrlr_list		payload = {};

	verify_fn = verify_namespace_attach;
@@ -534,7 +493,7 @@ test_namespace_attach(void)
static void
test_namespace_detach(void)
{
	struct spdk_nvme_ctrlr			ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_ctrlr_list		payload = {};

	verify_fn = verify_namespace_detach;
@@ -545,7 +504,7 @@ test_namespace_detach(void)
static void
test_namespace_create(void)
{
	struct spdk_nvme_ctrlr			ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_ns_data		payload = {};

	verify_fn = verify_namespace_create;
@@ -555,7 +514,7 @@ test_namespace_create(void)
static void
test_namespace_delete(void)
{
	struct spdk_nvme_ctrlr			ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	verify_fn = verify_namespace_delete;
	nvme_ctrlr_cmd_delete_ns(&ctrlr, namespace_management_nsid, NULL, NULL);
@@ -564,7 +523,7 @@ test_namespace_delete(void)
static void
test_format_nvme(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_format format = {};

	verify_fn = verify_format_nvme;
@@ -575,7 +534,7 @@ test_format_nvme(void)
static void
test_fw_commit(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();
	struct spdk_nvme_fw_commit fw_commit = {};

	fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG;
@@ -589,7 +548,7 @@ test_fw_commit(void)
static void
test_fw_image_download(void)
{
	struct spdk_nvme_ctrlr	ctrlr = {};
	DECLARE_AND_CONSTRUCT_CTRLR();

	verify_fn = verify_fw_image_download;

Loading