Commit ba5ae93d authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

bdev/nvme: nvme_ctrlr_op_rpc() always call callback for simplification



For nvme_ctrlr_op_rpc(). change its return type to void and it to
always call the completion callback. This simplifies the code and
make the next patch easier to reset each ctrlr sequentially when multipath
is configured.

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: I744313b9f8ac650fe7c468d657a2ce899629479c
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16743


Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 35774c71
Loading
Loading
Loading
Loading
+65 −10
Original line number Diff line number Diff line
@@ -2284,8 +2284,8 @@ bdev_nvme_reset(struct nvme_ctrlr *nvme_ctrlr)
	return 0;
}

int
nvme_ctrlr_op_rpc(struct nvme_ctrlr *nvme_ctrlr, enum nvme_ctrlr_op op,
static int
nvme_ctrlr_op(struct nvme_ctrlr *nvme_ctrlr, enum nvme_ctrlr_op op,
	      bdev_nvme_ctrlr_op_cb cb_fn, void *cb_arg)
{
	int rc;
@@ -2300,12 +2300,72 @@ nvme_ctrlr_op_rpc(struct nvme_ctrlr *nvme_ctrlr, enum nvme_ctrlr_op op,
	}

	if (rc == 0) {
		assert(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
		assert(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
		nvme_ctrlr->ctrlr_op_cb_fn = cb_fn;
		nvme_ctrlr->ctrlr_op_cb_arg = cb_arg;
	}
	return rc;
}

struct nvme_ctrlr_op_rpc_ctx {
	struct spdk_thread *orig_thread;
	int rc;
	bdev_nvme_ctrlr_op_cb cb_fn;
	void *cb_arg;
};

static void
_nvme_ctrlr_op_rpc_complete(void *_ctx)
{
	struct nvme_ctrlr_op_rpc_ctx *ctx = _ctx;

	assert(ctx != NULL);
	assert(ctx->cb_fn != NULL);

	ctx->cb_fn(ctx->cb_arg, ctx->rc);

	free(ctx);
}

static void
nvme_ctrlr_op_rpc_complete(void *cb_arg, int rc)
{
	struct nvme_ctrlr_op_rpc_ctx *ctx = cb_arg;

	ctx->rc = rc;

	spdk_thread_send_msg(ctx->orig_thread, _nvme_ctrlr_op_rpc_complete, ctx);
}

void
nvme_ctrlr_op_rpc(struct nvme_ctrlr *nvme_ctrlr, enum nvme_ctrlr_op op,
		  bdev_nvme_ctrlr_op_cb cb_fn, void *cb_arg)
{
	struct nvme_ctrlr_op_rpc_ctx *ctx;
	int rc;

	assert(cb_fn != NULL);

	ctx = calloc(1, sizeof(*ctx));
	if (ctx == NULL) {
		SPDK_ERRLOG("Failed to allocate nvme_ctrlr_op_rpc_ctx.\n");
		cb_fn(cb_arg, -ENOMEM);
		return;
	}

	ctx->orig_thread = spdk_get_thread();
	ctx->cb_fn = cb_fn;
	ctx->cb_arg = cb_arg;

	rc = nvme_ctrlr_op(nvme_ctrlr, op, nvme_ctrlr_op_rpc_complete, ctx);
	if (rc == 0) {
		return;
	}

	nvme_ctrlr_op_rpc_complete(ctx, rc);
}

static int _bdev_nvme_reset_io(struct nvme_io_path *io_path, struct nvme_bdev_io *bio);

static void
@@ -2391,20 +2451,15 @@ bdev_nvme_reset_io_continue(void *cb_arg, int rc)
static int
_bdev_nvme_reset_io(struct nvme_io_path *io_path, struct nvme_bdev_io *bio)
{
	struct nvme_ctrlr *nvme_ctrlr = io_path->qpair->ctrlr;
	struct nvme_ctrlr_channel *ctrlr_ch;
	struct spdk_bdev_io *bdev_io;
	int rc;

	rc = bdev_nvme_reset(nvme_ctrlr);
	rc = nvme_ctrlr_op(io_path->qpair->ctrlr, NVME_CTRLR_OP_RESET,
			   bdev_nvme_reset_io_continue, bio);
	if (rc == 0) {
		assert(bio->io_path == NULL);
		bio->io_path = io_path;

		assert(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
		assert(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
		nvme_ctrlr->ctrlr_op_cb_fn = bdev_nvme_reset_io_continue;
		nvme_ctrlr->ctrlr_op_cb_arg = bio;
	} else if (rc == -EBUSY) {
		ctrlr_ch = io_path->qpair->ctrlr_ch;
		assert(ctrlr_ch != NULL);
+5 −5
Original line number Diff line number Diff line
@@ -343,15 +343,15 @@ enum nvme_ctrlr_op {
/**
 * Operate NVMe controller for the specified code.
 *
 * NOTE: The callback function is always called after this function returns except for
 * out of memory cases.
 *
 * \param nvme_ctrlr The specified NVMe controller to operate
 * \param op Operation code
 * \param cb_fn Function to be called back after operation completes
 * \param cb_arg Argument for callback function
 * \return zero on success. Negated errno on the following error conditions:
 * -ENXIO: controller is being destroyed.
 * -EBUSY: controller is already being operated.
 */
int nvme_ctrlr_op_rpc(struct nvme_ctrlr *nvme_ctrlr, enum nvme_ctrlr_op op,
void nvme_ctrlr_op_rpc(struct nvme_ctrlr *nvme_ctrlr, enum nvme_ctrlr_op op,
		       bdev_nvme_ctrlr_op_cb cb_fn, void *cb_arg);

typedef void (*bdev_nvme_set_preferred_path_cb)(void *cb_arg, int rc);
+9 −49
Original line number Diff line number Diff line
@@ -1278,34 +1278,16 @@ static const struct spdk_json_object_decoder rpc_bdev_nvme_reset_controller_req_
	{"name", offsetof(struct rpc_bdev_nvme_reset_controller_req, name), spdk_json_decode_string},
};

struct rpc_bdev_nvme_reset_controller_ctx {
	struct spdk_jsonrpc_request *request;
	int rc;
	struct spdk_thread *orig_thread;
};

static void
_rpc_bdev_nvme_reset_controller_cb(void *_ctx)
rpc_bdev_nvme_reset_controller_cb(void *cb_arg, int rc)
{
	struct rpc_bdev_nvme_reset_controller_ctx *ctx = _ctx;
	struct spdk_jsonrpc_request *request = cb_arg;

	if (ctx->rc == 0) {
		spdk_jsonrpc_send_bool_response(ctx->request, true);
	if (rc == 0) {
		spdk_jsonrpc_send_bool_response(request, true);
	} else {
		spdk_jsonrpc_send_error_response(ctx->request, ctx->rc, spdk_strerror(-ctx->rc));
	}

	free(ctx);
		spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc));
	}

static void
rpc_bdev_nvme_reset_controller_cb(void *cb_arg, int rc)
{
	struct rpc_bdev_nvme_reset_controller_ctx *ctx = cb_arg;

	ctx->rc = rc;

	spdk_thread_send_msg(ctx->orig_thread, _rpc_bdev_nvme_reset_controller_cb, ctx);
}

static void
@@ -1313,49 +1295,27 @@ rpc_bdev_nvme_reset_controller(struct spdk_jsonrpc_request *request,
			       const struct spdk_json_val *params)
{
	struct rpc_bdev_nvme_reset_controller_req req = {NULL};
	struct rpc_bdev_nvme_reset_controller_ctx *ctx;
	struct nvme_ctrlr *nvme_ctrlr;
	int rc;

	ctx = calloc(1, sizeof(*ctx));
	if (ctx == NULL) {
		SPDK_ERRLOG("Memory allocation failed\n");
		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
						 "Memory allocation failed");
		return;
	}

	if (spdk_json_decode_object(params, rpc_bdev_nvme_reset_controller_req_decoders,
				    SPDK_COUNTOF(rpc_bdev_nvme_reset_controller_req_decoders),
				    &req)) {
		SPDK_ERRLOG("spdk_json_decode_object failed\n");
		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, spdk_strerror(EINVAL));
		goto err;
		goto exit;
	}

	nvme_ctrlr = nvme_ctrlr_get_by_name(req.name);
	if (nvme_ctrlr == NULL) {
		SPDK_ERRLOG("Failed at device lookup\n");
		spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
		goto err;
		goto exit;
	}

	ctx->request = request;
	ctx->orig_thread = spdk_get_thread();

	rc = nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, rpc_bdev_nvme_reset_controller_cb, ctx);
	if (rc != 0) {
		SPDK_NOTICELOG("Failed at bdev_nvme_reset_rpc\n");
		spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, spdk_strerror(-rc));
		goto err;
	}
	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, rpc_bdev_nvme_reset_controller_cb, request);

exit:
	free_rpc_bdev_nvme_reset_controller_req(&req);
	return;

err:
	free_rpc_bdev_nvme_reset_controller_req(&req);
	free(ctx);
}
SPDK_RPC_REGISTER("bdev_nvme_reset_controller", rpc_bdev_nvme_reset_controller, SPDK_RPC_RUNTIME)

+121 −0
Original line number Diff line number Diff line
@@ -6690,6 +6690,126 @@ test_race_between_reset_and_disconnected(void)
	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);

	spdk_put_io_channel(ch2);

	set_thread(0);

	spdk_put_io_channel(ch1);

	poll_threads();

	rc = bdev_nvme_delete("nvme0", &g_any_path);
	CU_ASSERT(rc == 0);

	poll_threads();
	spdk_delay_us(1000);
	poll_threads();

	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
{
	int *_rc = (int *)cb_arg;

	SPDK_CU_ASSERT_FATAL(_rc != NULL);
	*_rc = rc;
}

static void
test_ctrlr_op_rpc(void)
{
	struct spdk_nvme_transport_id trid = {};
	struct spdk_nvme_ctrlr ctrlr = {};
	struct nvme_ctrlr *nvme_ctrlr = NULL;
	struct nvme_path_id *curr_trid;
	struct spdk_io_channel *ch1, *ch2;
	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
	int ctrlr_op_rc;
	int rc;

	ut_init_trid(&trid);
	TAILQ_INIT(&ctrlr.active_io_qpairs);

	set_thread(0);

	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
	CU_ASSERT(rc == 0);

	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);

	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);

	ch1 = spdk_get_io_channel(nvme_ctrlr);
	SPDK_CU_ASSERT_FATAL(ch1 != NULL);

	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
	CU_ASSERT(ctrlr_ch1->qpair != NULL);

	set_thread(1);

	ch2 = spdk_get_io_channel(nvme_ctrlr);
	SPDK_CU_ASSERT_FATAL(ch2 != NULL);

	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
	CU_ASSERT(ctrlr_ch2->qpair != NULL);

	/* Reset starts from thread 1. */
	set_thread(1);

	/* Case 1: ctrlr is already being destructed. */
	nvme_ctrlr->destruct = true;
	ctrlr_op_rc = 0;

	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);

	poll_threads();

	CU_ASSERT(ctrlr_op_rc == -ENXIO);

	/* Case 2: reset is in progress. */
	nvme_ctrlr->destruct = false;
	nvme_ctrlr->resetting = true;
	ctrlr_op_rc = 0;

	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);

	poll_threads();

	CU_ASSERT(ctrlr_op_rc == -EBUSY);

	/* Case 3: reset completes successfully. */
	nvme_ctrlr->resetting = false;
	curr_trid->last_failed_tsc = spdk_get_ticks();
	ctrlr.is_failed = true;
	ctrlr_op_rc = -1;

	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);

	CU_ASSERT(nvme_ctrlr->resetting == true);
	CU_ASSERT(ctrlr_op_rc == -1);

	poll_threads();
	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
	poll_threads();

	CU_ASSERT(nvme_ctrlr->resetting == false);
	CU_ASSERT(curr_trid->last_failed_tsc == 0);
	CU_ASSERT(ctrlr.is_failed == false);
	CU_ASSERT(ctrlr_op_rc == 0);

	/* Case 4: invalid operation. */
	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);

	poll_threads();

	CU_ASSERT(ctrlr_op_rc == -EINVAL);

	spdk_put_io_channel(ch2);

@@ -6763,6 +6883,7 @@ main(int argc, const char **argv)
	CU_ADD_TEST(suite, test_uuid_generation);
	CU_ADD_TEST(suite, test_retry_io_to_same_path);
	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
	CU_ADD_TEST(suite, test_ctrlr_op_rpc);

	CU_basic_set_mode(CU_BRM_VERBOSE);