Commit 6bdcf5ab authored by GangCao's avatar GangCao Committed by Daniel Verkamp
Browse files

nvme: use nvme_robust_mutex related operations



Change-Id: I35416506dbafe5e9d21861e207e295e114bdb3db
Signed-off-by: default avatarGangCao <gang.cao@intel.com>
parent 47341b89
Loading
Loading
Loading
Loading
+17 −17
Original line number Diff line number Diff line
@@ -59,7 +59,7 @@ nvme_attach(enum spdk_nvme_transport_type trtype,
int
spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
{
	pthread_mutex_lock(&g_spdk_nvme_driver->lock);
	nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);

	nvme_ctrlr_proc_put_ref(ctrlr);

@@ -68,7 +68,7 @@ spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
		nvme_ctrlr_destruct(ctrlr);
	}

	pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
	nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
	return 0;
}

@@ -289,7 +289,7 @@ nvme_driver_init(void)
		return ret;
	}

	pthread_mutex_lock(&g_spdk_nvme_driver->lock);
	nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);

	g_spdk_nvme_driver->initialized = false;

@@ -301,7 +301,7 @@ nvme_driver_init(void)
	if (g_spdk_nvme_driver->request_mempool == NULL) {
		SPDK_ERRLOG("unable to allocate pool of requests\n");

		pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
		nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
		pthread_mutex_destroy(&g_spdk_nvme_driver->lock);

		spdk_memzone_free(SPDK_NVME_DRIVER_NAME);
@@ -309,7 +309,7 @@ nvme_driver_init(void)
		return -1;
	}

	pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
	nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);

	return ret;
}
@@ -343,7 +343,7 @@ nvme_init_controllers(void *cb_ctx, spdk_nvme_attach_cb attach_cb)
	int start_rc;
	struct spdk_nvme_ctrlr *ctrlr, *ctrlr_tmp;

	pthread_mutex_lock(&g_spdk_nvme_driver->lock);
	nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);

	/* Initialize all new controllers in the init_ctrlrs list in parallel. */
	while (!TAILQ_EMPTY(&g_spdk_nvme_driver->init_ctrlrs)) {
@@ -356,9 +356,9 @@ nvme_init_controllers(void *cb_ctx, spdk_nvme_attach_cb attach_cb)
			 *  the functions it calls (in particular nvme_ctrlr_set_num_qpairs())
			 *  can assume it is held.
			 */
			pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
			nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
			start_rc = nvme_ctrlr_process_init(ctrlr);
			pthread_mutex_lock(&g_spdk_nvme_driver->lock);
			nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);

			if (start_rc) {
				/* Controller failed to initialize. */
@@ -386,9 +386,9 @@ nvme_init_controllers(void *cb_ctx, spdk_nvme_attach_cb attach_cb)
				 * Unlock while calling attach_cb() so the user can call other functions
				 *  that may take the driver lock, like nvme_detach().
				 */
				pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
				nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
				attach_cb(cb_ctx, &ctrlr->probe_info, ctrlr, &ctrlr->opts);
				pthread_mutex_lock(&g_spdk_nvme_driver->lock);
				nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);

				break;
			}
@@ -397,7 +397,7 @@ nvme_init_controllers(void *cb_ctx, spdk_nvme_attach_cb attach_cb)

	g_spdk_nvme_driver->initialized = true;

	pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
	nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
	return rc;
}

@@ -423,7 +423,7 @@ _spdk_nvme_probe(const struct spdk_nvme_discover_info *info, void *cb_ctx,
		return rc;
	}

	pthread_mutex_lock(&g_spdk_nvme_driver->lock);
	nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);

	if (hotplug_fd < 0) {
		hotplug_fd = spdk_uevent_connect();
@@ -437,7 +437,7 @@ _spdk_nvme_probe(const struct spdk_nvme_discover_info *info, void *cb_ctx,
	} else {
		if (!spdk_nvme_transport_available(info->trtype)) {
			SPDK_ERRLOG("NVMe over Fabrics trtype %u not available\n", info->trtype);
			pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
			nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
			return -1;
		}

@@ -454,16 +454,16 @@ _spdk_nvme_probe(const struct spdk_nvme_discover_info *info, void *cb_ctx,
			 * Unlock while calling attach_cb() so the user can call other functions
			 *  that may take the driver lock, like nvme_detach().
			 */
			pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
			nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
			attach_cb(cb_ctx, &ctrlr->probe_info, ctrlr, &ctrlr->opts);
			pthread_mutex_lock(&g_spdk_nvme_driver->lock);
			nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
		}

		pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
		nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
		return 0;
	}

	pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
	nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
	/*
	 * Keep going even if one or more nvme_attach() calls failed,
	 *  but maintain the value of rc to signal errors when we return.
+32 −32
Original line number Diff line number Diff line
@@ -151,7 +151,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
		return NULL;
	}

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);

	/*
	 * Get the first available I/O queue ID.
@@ -159,14 +159,14 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
	if (qid > ctrlr->opts.num_io_queues) {
		SPDK_ERRLOG("No free I/O queue IDs\n");
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return NULL;
	}

	qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, qprio);
	if (qpair == NULL) {
		SPDK_ERRLOG("transport->ctrlr_create_io_qpair() failed\n");
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return NULL;
	}
	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
@@ -174,7 +174,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,

	nvme_ctrlr_proc_add_io_qpair(qpair);

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return qpair;
}
@@ -190,7 +190,7 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)

	ctrlr = qpair->ctrlr;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);

	nvme_ctrlr_proc_remove_io_qpair(qpair);

@@ -198,11 +198,11 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
	spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);

	if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -1;
	}

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	return 0;
}

@@ -495,7 +495,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
	int rc = 0;
	struct spdk_nvme_qpair *qpair;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);

	if (ctrlr->is_resetting || ctrlr->is_failed) {
		/*
@@ -503,7 +503,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
		 *  immediately since there is no need to kick off another
		 *  reset in these cases.
		 */
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return 0;
	}

@@ -541,7 +541,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)

	ctrlr->is_resetting = false;

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return rc;
}
@@ -990,7 +990,7 @@ nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
	struct spdk_nvme_ctrlr_process	*active_proc;
	pid_t				pid = getpid();

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);

	nvme_ctrlr_remove_inactive_proc(ctrlr);

@@ -1001,7 +1001,7 @@ nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
		}
	}

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
}

void
@@ -1011,7 +1011,7 @@ nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
	pid_t				pid = getpid();
	int				proc_count;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);

	proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);

@@ -1032,7 +1032,7 @@ nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
		}
	}

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
}

int
@@ -1041,7 +1041,7 @@ nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
	struct spdk_nvme_ctrlr_process	*active_proc;
	int				ref = 0;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);

	nvme_ctrlr_remove_inactive_proc(ctrlr);

@@ -1049,7 +1049,7 @@ nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
		ref += active_proc->ref;
	}

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return ref;
}
@@ -1362,12 +1362,12 @@ spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
{
	int32_t num_completions;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	if (ctrlr->keep_alive_interval_ticks) {
		nvme_ctrlr_keep_alive(ctrlr);
	}
	num_completions = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return num_completions;
}
@@ -1447,9 +1447,9 @@ spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	if (res)
		return res;
	while (status.done == false) {
		pthread_mutex_lock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
@@ -1472,9 +1472,9 @@ spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	if (res)
		return res;
	while (status.done == false) {
		pthread_mutex_lock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
@@ -1495,9 +1495,9 @@ spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_dat
	if (res)
		return 0;
	while (status.done == false) {
		pthread_mutex_lock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
@@ -1524,9 +1524,9 @@ spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
	if (res)
		return res;
	while (status.done == false) {
		pthread_mutex_lock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
@@ -1549,9 +1549,9 @@ spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	if (res)
		return res;
	while (status.done == false) {
		pthread_mutex_lock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
@@ -1594,9 +1594,9 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
			return res;

		while (status.done == false) {
			pthread_mutex_lock(&ctrlr->ctrlr_lock);
			nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
			spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
			pthread_mutex_unlock(&ctrlr->ctrlr_lock);
			nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		}
		if (spdk_nvme_cpl_is_error(&status.cpl)) {
			SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
@@ -1620,9 +1620,9 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
		return res;

	while (status.done == false) {
		pthread_mutex_lock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
		SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n");
+33 −33
Original line number Diff line number Diff line
@@ -62,10 +62,10 @@ spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
	struct nvme_request	*req;
	int			rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -73,7 +73,7 @@ spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	return rc;
}

@@ -135,11 +135,11 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	struct spdk_nvme_cmd			*cmd;
	int					rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ctrlr_list),
					      cb_fn, cb_arg, true);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -150,7 +150,7 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	return rc;
}

@@ -162,11 +162,11 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	struct spdk_nvme_cmd			*cmd;
	int					rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ctrlr_list),
					      cb_fn, cb_arg, true);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -177,7 +177,7 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	return rc;
}

@@ -189,11 +189,11 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data
	struct spdk_nvme_cmd			*cmd;
	int					rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ns_data),
					      cb_fn, cb_arg, true);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -203,7 +203,7 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	return rc;
}

@@ -215,10 +215,10 @@ nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme
	struct spdk_nvme_cmd			*cmd;
	int					rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -229,7 +229,7 @@ nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);

	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	return rc;
}

@@ -241,10 +241,10 @@ nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_
	struct spdk_nvme_cmd *cmd;
	int rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -254,7 +254,7 @@ nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_
	memcpy(&cmd->cdw10, format, sizeof(uint32_t));

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return rc;
}
@@ -268,10 +268,10 @@ spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
	struct spdk_nvme_cmd *cmd;
	int rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -282,7 +282,7 @@ spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
	cmd->cdw12 = cdw12;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return rc;
}
@@ -296,10 +296,10 @@ spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
	struct spdk_nvme_cmd *cmd;
	int rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -309,7 +309,7 @@ spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
	cmd->cdw11 = cdw11;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return rc;
}
@@ -356,10 +356,10 @@ spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page
	numdl = numd & 0xFFFFu;
	numdu = (numd >> 16) & 0xFFFFu;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_user_copy(payload, payload_size, cb_fn, cb_arg, false);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -371,7 +371,7 @@ spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page
	cmd->cdw11 = numdu;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return rc;
}
@@ -404,10 +404,10 @@ nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
	struct spdk_nvme_cmd *cmd;
	int rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -416,7 +416,7 @@ nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
	memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return rc;

@@ -431,10 +431,10 @@ nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
	struct spdk_nvme_cmd *cmd;
	int rc;

	pthread_mutex_lock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_user_copy(payload, size, cb_fn, cb_arg, true);
	if (req == NULL) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -ENOMEM;
	}

@@ -444,7 +444,7 @@ nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
	cmd->cdw11 = offset >> 2;

	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

	return rc;
}
+2 −2
Original line number Diff line number Diff line
@@ -55,9 +55,9 @@ int nvme_ns_identify_update(struct spdk_nvme_ns *ns)
	}

	while (status.done == false) {
		pthread_mutex_lock(&ns->ctrlr->ctrlr_lock);
		nvme_robust_mutex_lock(&ns->ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ns->ctrlr->adminq, 0);
		pthread_mutex_unlock(&ns->ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ns->ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
		/* This can occur if the namespace is not active. Simply zero the
+4 −4
Original line number Diff line number Diff line
@@ -1657,7 +1657,7 @@ nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_reques
	nvme_pcie_qpair_check_enabled(qpair);

	if (nvme_qpair_is_admin_queue(qpair)) {
		pthread_mutex_lock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	}

	tr = LIST_FIRST(&pqpair->free_tr);
@@ -1706,7 +1706,7 @@ nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_reques

exit:
	if (nvme_qpair_is_admin_queue(qpair)) {
		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}

	return rc;
@@ -1733,7 +1733,7 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
	}

	if (nvme_qpair_is_admin_queue(qpair)) {
		pthread_mutex_lock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	}

	if (max_completions == 0 || (max_completions > (qpair->num_entries - 1U))) {
@@ -1782,7 +1782,7 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
	if (nvme_qpair_is_admin_queue(qpair)) {
		nvme_pcie_qpair_complete_pending_admin_request(qpair);

		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}

	return num_completions;