Commit e10b4806 authored by Jim Harris's avatar Jim Harris Committed by Tomasz Zawadzki
Browse files

nvme: add nvme_ctrlr_lock



Rather than doing nvme_robust_mutex_lock(&ctrlr->ctrlr_lock)
everywhere, we can just do nvme_ctrlr_lock(ctrlr) instead.

This will allow for adding some code to ensure controller lock isn't
held when the controller is destroyed, as part of debugging issue
#3401.

Signed-off-by: default avatarJim Harris <jim.harris@samsung.com>
Change-Id: Id49a3e0a92f18c9f5998cfd546935dc718d153ba
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/23729


Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
parent 78977629
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -706,9 +706,9 @@ nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr,
		/* Controller failed to initialize. */
		TAILQ_REMOVE(&probe_ctx->init_ctrlrs, ctrlr, tailq);
		SPDK_ERRLOG("Failed to initialize SSD: %s\n", ctrlr->trid.traddr);
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_lock(ctrlr);
		nvme_ctrlr_fail(ctrlr, false);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		nvme_ctrlr_destruct(ctrlr);
		return;
	}
+5 −5
Original line number Diff line number Diff line
@@ -192,11 +192,11 @@ nvme_auth_get_seqnum(struct spdk_nvme_qpair *qpair)
	uint32_t seqnum;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	if (ctrlr->auth_seqnum == 0) {
		rc = RAND_bytes((void *)&ctrlr->auth_seqnum, sizeof(ctrlr->auth_seqnum));
		if (rc != 1) {
			nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
			nvme_ctrlr_unlock(ctrlr);
			return 0;
		}
	}
@@ -204,7 +204,7 @@ nvme_auth_get_seqnum(struct spdk_nvme_qpair *qpair)
		ctrlr->auth_seqnum = 1;
	}
	seqnum = ctrlr->auth_seqnum;
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return seqnum;
}
@@ -1237,9 +1237,9 @@ nvme_fabric_qpair_authenticate_async(struct spdk_nvme_qpair *qpair)
	assert(qpair->poll_status == NULL);
	qpair->poll_status = status;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	auth->tid = ctrlr->auth_tid++;
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	nvme_auth_set_state(qpair, NVME_QPAIR_AUTH_STATE_NEGOTIATE);

+76 −76
Original line number Diff line number Diff line
@@ -383,11 +383,11 @@ nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
		return NULL;
	}

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	cc.raw = ctrlr->process_init_cc.raw;

	if (opts->qprio & ~SPDK_NVME_CREATE_IO_SQ_QPRIO_MASK) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return NULL;
	}

@@ -397,13 +397,13 @@ nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
	 */
	if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts->qprio != SPDK_NVME_QPRIO_URGENT)) {
		NVME_CTRLR_ERRLOG(ctrlr, "invalid queue priority for default round robin arbitration method\n");
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return NULL;
	}

	qid = spdk_nvme_ctrlr_alloc_qid(ctrlr);
	if (qid < 0) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return NULL;
	}

@@ -411,7 +411,7 @@ nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
	if (qpair == NULL) {
		NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_create_io_qpair() failed\n");
		spdk_nvme_ctrlr_free_qid(ctrlr, qid);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return NULL;
	}

@@ -419,7 +419,7 @@ nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,

	nvme_ctrlr_proc_add_io_qpair(qpair);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return qpair;
}
@@ -433,9 +433,9 @@ spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
		return -EISCONN;
	}

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
		spdk_delay_us(100);
@@ -449,9 +449,9 @@ spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
{
	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

struct spdk_nvme_qpair *
@@ -464,7 +464,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
	struct spdk_nvme_io_qpair_opts	opts;
	int				rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	if (spdk_unlikely(ctrlr->state != NVME_CTRLR_STATE_READY)) {
		/* When controller is resetting or initializing, free_io_qids is deleted or not created yet.
@@ -518,7 +518,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
	}

unlock:
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return qpair;
}
@@ -535,7 +535,7 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
	assert(qpair->ctrlr != NULL);

	ctrlr = qpair->ctrlr;
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	qpair_state = nvme_qpair_get_state(qpair);

	if (ctrlr->is_removed) {
@@ -565,7 +565,7 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
	}

out:
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -587,9 +587,9 @@ nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair)
	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;

	assert(ctrlr != NULL);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

int
@@ -634,7 +634,7 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
		nvme_qpair_abort_all_queued_reqs(qpair);
	}

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	nvme_ctrlr_proc_remove_io_qpair(qpair);

@@ -642,7 +642,7 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
	spdk_nvme_ctrlr_free_qid(ctrlr, qpair->id);

	nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return 0;
}

@@ -1051,9 +1051,9 @@ nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
void
spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
{
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	nvme_ctrlr_fail(ctrlr, false);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

static void
@@ -1689,9 +1689,9 @@ spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
{
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	rc = nvme_ctrlr_disconnect(ctrlr);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -1699,7 +1699,7 @@ spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
void
spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
{
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	ctrlr->prepare_for_reset = false;

@@ -1803,7 +1803,7 @@ spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
	}
	ctrlr->is_resetting = false;

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	if (!ctrlr->cdata.oaes.ns_attribute_notices) {
		/*
@@ -1863,14 +1863,14 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
{
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	rc = nvme_ctrlr_disconnect(ctrlr);
	if (rc == 0) {
		nvme_ctrlr_fail_io_qpairs(ctrlr);
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	if (rc != 0) {
		if (rc == -EBUSY) {
@@ -1911,12 +1911,12 @@ spdk_nvme_ctrlr_reset_subsystem(struct spdk_nvme_ctrlr *ctrlr)
	}

	NVME_CTRLR_NOTICELOG(ctrlr, "resetting subsystem\n");
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	ctrlr->is_resetting = true;
	rc = nvme_ctrlr_set_nssr(ctrlr, SPDK_NVME_NSSR_VALUE);
	ctrlr->is_resetting = false;

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	/*
	 * No more cleanup at this point like in the ctrlr reset. A subsystem reset will cause
	 * a hot remove for PCIe transport. The hot remove handling does all the necessary ctrlr cleanup.
@@ -1929,7 +1929,7 @@ spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transpo
{
	int rc = 0;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	if (ctrlr->is_failed == false) {
		rc = -EPERM;
@@ -1949,7 +1949,7 @@ spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transpo
	ctrlr->trid = *trid;

out:
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -1961,10 +1961,10 @@ spdk_nvme_ctrlr_set_remove_cb(struct spdk_nvme_ctrlr *ctrlr,
		return;
	}

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	ctrlr->remove_cb = remove_cb;
	ctrlr->cb_ctx = remove_ctx;
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

static void
@@ -3501,7 +3501,7 @@ nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
{
	struct spdk_nvme_ctrlr_process	*active_proc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	nvme_ctrlr_remove_inactive_proc(ctrlr);

@@ -3510,7 +3510,7 @@ nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
		active_proc->ref++;
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

void
@@ -3519,7 +3519,7 @@ nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
	struct spdk_nvme_ctrlr_process	*active_proc;
	int				proc_count;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);

@@ -3537,7 +3537,7 @@ nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
		}
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

int
@@ -3546,7 +3546,7 @@ nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
	struct spdk_nvme_ctrlr_process	*active_proc;
	int				ref = 0;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	nvme_ctrlr_remove_inactive_proc(ctrlr);

@@ -3554,7 +3554,7 @@ nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
		ref += active_proc->ref;
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return ref;
}
@@ -3568,14 +3568,14 @@ nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
	struct spdk_nvme_ctrlr_process	*active_proc;
	struct spdk_pci_device		*devhandle = NULL;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	active_proc = nvme_ctrlr_get_current_process(ctrlr);
	if (active_proc) {
		devhandle = active_proc->devhandle;
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return devhandle;
}
@@ -4354,19 +4354,19 @@ spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
	int32_t rc;
	struct spdk_nvme_ctrlr_process	*active_proc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	if (ctrlr->keep_alive_interval_ticks) {
		rc = nvme_ctrlr_keep_alive(ctrlr);
		if (rc) {
			nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
			nvme_ctrlr_unlock(ctrlr);
			return rc;
		}
	}

	rc = nvme_io_msg_process(ctrlr);
	if (rc < 0) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return rc;
	}
	num_completions = rc;
@@ -4383,7 +4383,7 @@ spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
		nvme_ctrlr_disconnect_done(ctrlr);
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	if (rc < 0) {
		num_completions = rc;
@@ -4544,7 +4544,7 @@ spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
		return NULL;
	}

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	tmp.id = nsid;
	ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
@@ -4552,7 +4552,7 @@ spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
	if (ns == NULL) {
		ns = spdk_zmalloc(sizeof(struct spdk_nvme_ns), 64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
		if (ns == NULL) {
			nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
			nvme_ctrlr_unlock(ctrlr);
			return NULL;
		}

@@ -4561,7 +4561,7 @@ spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
		RB_INSERT(nvme_ns_tree, &ctrlr->ns, ns);
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return ns;
}
@@ -4603,7 +4603,7 @@ spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
{
	struct spdk_nvme_ctrlr_process *active_proc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	active_proc = nvme_ctrlr_get_current_process(ctrlr);
	if (active_proc) {
@@ -4611,7 +4611,7 @@ spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
		active_proc->aer_cb_arg = aer_cb_arg;
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

void
@@ -4627,7 +4627,7 @@ spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
{
	struct spdk_nvme_ctrlr_process	*active_proc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	active_proc = nvme_ctrlr_get_current_process(ctrlr);
	if (active_proc) {
@@ -4639,7 +4639,7 @@ spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,

	ctrlr->timeout_enabled = true;

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

bool
@@ -4956,9 +4956,9 @@ spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)

	size = cmbsz.bits.sz * (0x1000 << (cmbsz.bits.szu * 4));

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	rc = nvme_transport_ctrlr_reserve_cmb(ctrlr);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	if (rc < 0) {
		return rc;
@@ -4972,9 +4972,9 @@ spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
{
	void *buf;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	buf = nvme_transport_ctrlr_map_cmb(ctrlr, size);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return buf;
}
@@ -4982,9 +4982,9 @@ spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
void
spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	nvme_transport_ctrlr_unmap_cmb(ctrlr);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

int
@@ -4992,9 +4992,9 @@ spdk_nvme_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
{
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	rc = nvme_transport_ctrlr_enable_pmr(ctrlr);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -5004,9 +5004,9 @@ spdk_nvme_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
{
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	rc = nvme_transport_ctrlr_disable_pmr(ctrlr);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -5016,9 +5016,9 @@ spdk_nvme_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
{
	void *buf;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	buf = nvme_transport_ctrlr_map_pmr(ctrlr, size);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return buf;
}
@@ -5028,9 +5028,9 @@ spdk_nvme_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
{
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	rc = nvme_transport_ctrlr_unmap_pmr(ctrlr);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -5057,25 +5057,25 @@ spdk_nvme_ctrlr_read_boot_partition_start(struct spdk_nvme_ctrlr *ctrlr, void *p
		return -EALREADY;
	}

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	bpmb_size = bprsz * 4096;
	bpmbl = spdk_vtophys(payload, &bpmb_size);
	if (bpmbl == SPDK_VTOPHYS_ERROR) {
		NVME_CTRLR_ERRLOG(ctrlr, "spdk_vtophys of bpmbl failed\n");
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -EFAULT;
	}

	if (bpmb_size != bprsz * 4096) {
		NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition buffer is not physically contiguous\n");
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -EFAULT;
	}

	if (nvme_ctrlr_set_bpmbl(ctrlr, bpmbl)) {
		NVME_CTRLR_ERRLOG(ctrlr, "set_bpmbl() failed\n");
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -EIO;
	}

@@ -5085,11 +5085,11 @@ spdk_nvme_ctrlr_read_boot_partition_start(struct spdk_nvme_ctrlr *ctrlr, void *p

	if (nvme_ctrlr_set_bprsel(ctrlr, &bprsel)) {
		NVME_CTRLR_ERRLOG(ctrlr, "set_bprsel() failed\n");
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -EIO;
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return 0;
}

@@ -5325,16 +5325,16 @@ spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr)
	uint32_t qid;

	assert(ctrlr->free_io_qids);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
	if (qid > ctrlr->opts.num_io_queues) {
		NVME_CTRLR_ERRLOG(ctrlr, "No free I/O queue IDs\n");
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -1;
	}

	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return qid;
}

@@ -5343,13 +5343,13 @@ spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid)
{
	assert(qid <= ctrlr->opts.num_io_queues);

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	if (spdk_likely(ctrlr->free_io_qids)) {
		spdk_bit_array_set(ctrlr->free_io_qids, qid);
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
}

int
+64 −64
Original line number Diff line number Diff line
@@ -130,10 +130,10 @@ spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
	struct nvme_request	*req;
	int			rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -141,7 +141,7 @@ spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -154,12 +154,12 @@ nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cnt
	struct spdk_nvme_cmd *cmd;
	int		     rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq,
					      payload, payload_size,
					      cb_fn, cb_arg, false);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -172,7 +172,7 @@ nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cnt

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -184,12 +184,12 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	struct spdk_nvme_cmd			*cmd;
	int					rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq,
					      payload, sizeof(struct spdk_nvme_ctrlr_list),
					      cb_fn, cb_arg, true);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -200,7 +200,7 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -212,12 +212,12 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	struct spdk_nvme_cmd			*cmd;
	int					rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq,
					      payload, sizeof(struct spdk_nvme_ctrlr_list),
					      cb_fn, cb_arg, true);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -228,7 +228,7 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -240,12 +240,12 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data
	struct spdk_nvme_cmd			*cmd;
	int					rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq,
					      payload, sizeof(struct spdk_nvme_ns_data),
					      cb_fn, cb_arg, true);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -255,7 +255,7 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -267,10 +267,10 @@ nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme
	struct spdk_nvme_cmd			*cmd;
	int					rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -281,7 +281,7 @@ nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -293,10 +293,10 @@ nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t pr
	struct spdk_nvme_cmd			*cmd;
	int					rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -307,7 +307,7 @@ nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t pr

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -319,10 +319,10 @@ nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -332,7 +332,7 @@ nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_
	memcpy(&cmd->cdw10, format, sizeof(uint32_t));

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -346,11 +346,11 @@ spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
					      true);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -361,7 +361,7 @@ spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
	cmd->cdw12 = cdw12;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -375,11 +375,11 @@ spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
					      false);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -389,7 +389,7 @@ spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
	cmd->cdw11 = cdw11;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -404,11 +404,11 @@ spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t featur
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
					      false);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -419,7 +419,7 @@ spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t featur
	cmd->nsid = ns_id;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -434,11 +434,11 @@ spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t featur
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
					      true);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -450,7 +450,7 @@ spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t featur
	cmd->nsid = ns_id;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -542,17 +542,17 @@ spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_
	lpol = (uint32_t)offset;
	lpou = (uint32_t)(offset >> 32);

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	if (offset && !ctrlr->cdata.lpa.edlp) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -EINVAL;
	}

	req = nvme_allocate_request_user_copy(ctrlr->adminq,
					      payload, payload_size, cb_fn, cb_arg, false);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -570,7 +570,7 @@ spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_
	cmd->cdw14 = cdw14;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -659,10 +659,10 @@ spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair
		qpair = ctrlr->adminq;
	}

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}
	req->cb_arg = req;
@@ -676,7 +676,7 @@ spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair

	rc = _nvme_ctrlr_submit_abort_request(ctrlr, req);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -754,7 +754,7 @@ spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qp
		return -EINVAL;
	}

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);

	if (qpair == NULL) {
		qpair = ctrlr->adminq;
@@ -762,7 +762,7 @@ spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qp

	parent = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
	if (parent == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);

		return -ENOMEM;
	}
@@ -842,7 +842,7 @@ spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qp
		nvme_free_request(parent);
	}

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}

@@ -855,10 +855,10 @@ nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -867,7 +867,7 @@ nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
	memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;

@@ -882,10 +882,10 @@ nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -895,7 +895,7 @@ nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
	cmd->cdw11 = offset >> 2;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -909,11 +909,11 @@ spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
					      cb_fn, cb_arg, false);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -926,7 +926,7 @@ spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp
	cmd->cdw11 = payload_size;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -940,11 +940,11 @@ spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
					      cb_fn, cb_arg, true);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -957,7 +957,7 @@ spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
	cmd->cdw11 = payload_size;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -971,10 +971,10 @@ nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	struct spdk_nvme_cmd *cmd;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -985,7 +985,7 @@ nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10));

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
@@ -1001,11 +1001,11 @@ nvme_ctrlr_cmd_directive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	struct spdk_nvme_cmd *cmd = NULL;
	int rc;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
					      cb_fn, cb_arg, host_to_ctrlr);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}
	cmd = &req->cmd;
@@ -1021,7 +1021,7 @@ nvme_ctrlr_cmd_directive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	cmd->cdw12 = cdw12;
	cmd->cdw13 = cdw13;
	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);

	return rc;
}
+3 −3
Original line number Diff line number Diff line
@@ -50,11 +50,11 @@ spdk_nvme_ocssd_ctrlr_cmd_geometry(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
		return -EINVAL;
	}

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_lock(ctrlr);
	req = nvme_allocate_request_user_copy(ctrlr->adminq,
					      payload, payload_size, cb_fn, cb_arg, false);
	if (req == NULL) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_ctrlr_unlock(ctrlr);
		return -ENOMEM;
	}

@@ -64,6 +64,6 @@ spdk_nvme_ocssd_ctrlr_cmd_geometry(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_ctrlr_unlock(ctrlr);
	return rc;
}
Loading