Commit 4880b074 authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Tomasz Zawadzki
Browse files

nvme: clear request in NVME_REQUEST_INIT()



The request was always being cleared just before calling this macro, so
it's now moved inside this macro.

That also required moving the call to NVME_REQUEST_INIT() earlier in
nvme_fabric_qpair_connect_async() to ensure it's cleared before the
request is being filled out.

Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I513568b8f37d29af6c819a7ed4f13b518cac6aee
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/21983


Reviewed-by: default avatarBen Walker <ben@nvidia.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 73175c6d
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -544,6 +544,9 @@ nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entr

	assert(qpair->reserved_req != NULL);
	req = qpair->reserved_req;
	NVME_INIT_REQUEST(req, nvme_completion_poll_cb, status, NVME_PAYLOAD_CONTIG(nvmf_data, NULL),
			  sizeof(*nvmf_data), 0);

	memcpy(&req->cmd, &cmd, sizeof(cmd));

	if (nvme_qpair_is_admin_queue(qpair)) {
@@ -558,9 +561,6 @@ nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entr
	snprintf(nvmf_data->hostnqn, sizeof(nvmf_data->hostnqn), "%s", ctrlr->opts.hostnqn);
	snprintf(nvmf_data->subnqn, sizeof(nvmf_data->subnqn), "%s", ctrlr->trid.subnqn);

	NVME_INIT_REQUEST(req, nvme_completion_poll_cb, status, NVME_PAYLOAD_CONTIG(nvmf_data, NULL),
			  sizeof(*nvmf_data), 0);

	rc = nvme_qpair_submit_request(qpair, req);
	if (rc < 0) {
		SPDK_ERRLOG("Failed to allocate/submit FABRIC_CONNECT command, rc %d\n", rc);
+18 −13
Original line number Diff line number Diff line
@@ -1310,8 +1310,26 @@ typedef int (*spdk_nvme_parse_ana_log_page_cb)(
int	nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
				      spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg);

static inline void
nvme_request_clear(struct nvme_request *req)
{
	/*
	 * Only memset/zero fields that need it.  All other fields
	 *  will be initialized appropriately either later in this
	 *  function, or before they are needed later in the
	 *  submission patch.  For example, the children
	 *  TAILQ_ENTRY and following members are
	 *  only used as part of I/O splitting so we avoid
	 *  memsetting them until it is actually needed.
	 *  They will be initialized in nvme_request_add_child()
	 *  if the request is split.
	 */
	memset(req, 0, offsetof(struct nvme_request, payload_size));
}

#define NVME_INIT_REQUEST(req, _cb_fn, _cb_arg, _payload, _payload_size, _md_size)	\
	do {						\
		nvme_request_clear(req);		\
		req->cb_fn = _cb_fn;			\
		req->cb_arg = _cb_arg;			\
		req->payload = _payload;		\
@@ -1337,19 +1355,6 @@ nvme_allocate_request(struct spdk_nvme_qpair *qpair,
	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
	qpair->num_outstanding_reqs++;

	/*
	 * Only memset/zero fields that need it.  All other fields
	 *  will be initialized appropriately either later in this
	 *  function, or before they are needed later in the
	 *  submission patch.  For example, the children
	 *  TAILQ_ENTRY and following members are
	 *  only used as part of I/O splitting so we avoid
	 *  memsetting them until it is actually needed.
	 *  They will be initialized in nvme_request_add_child()
	 *  if the request is split.
	 */
	memset(req, 0, offsetof(struct nvme_request, payload_size));

	NVME_INIT_REQUEST(req, cb_fn, cb_arg, *payload, payload_size, md_size);

	return req;