Commit 7c90a750 authored by Ben Walker's avatar Ben Walker Committed by Changpeng Liu
Browse files

nvme: Repack qpair structures



Try to group data members that are used often into the
same cache lines. We still need to find more space in the second
cache line of spdk_nvme_pcie_qpair so that the important
parts of spdk_nvme_qpair fit.

Change-Id: Ib936cb2b1acc722de7ec313d6faa3812aacde394
Signed-off-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/447968


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
parent 8e088c9b
Loading
Loading
Loading
Loading
+10 −9
Original line number Diff line number Diff line
@@ -324,14 +324,7 @@ struct nvme_async_event_request {
};

struct spdk_nvme_qpair {
	STAILQ_HEAD(, nvme_request)	free_req;
	STAILQ_HEAD(, nvme_request)	queued_req;
	/** Commands opcode in this list will return error */
	TAILQ_HEAD(, nvme_error_cmd)	err_cmd_head;
	/** Requests in this list will return error */
	STAILQ_HEAD(, nvme_request)	err_req_head;

	enum spdk_nvme_transport_type	trtype;
	struct spdk_nvme_ctrlr		*ctrlr;

	uint16_t			id;

@@ -351,7 +344,15 @@ struct spdk_nvme_qpair {
	 */
	uint8_t				no_deletion_notification_needed: 1;

	struct spdk_nvme_ctrlr		*ctrlr;
	enum spdk_nvme_transport_type	trtype;

	STAILQ_HEAD(, nvme_request)	free_req;
	STAILQ_HEAD(, nvme_request)	queued_req;

	/** Commands opcode in this list will return error */
	TAILQ_HEAD(, nvme_error_cmd)	err_cmd_head;
	/** Requests in this list will return error */
	STAILQ_HEAD(, nvme_request)	err_req_head;

	/* List entry for spdk_nvme_ctrlr::active_io_qpairs */
	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
+22 −20
Original line number Diff line number Diff line
@@ -164,10 +164,12 @@ struct nvme_pcie_qpair {
	uint16_t cq_head;
	uint16_t sq_head;

	uint8_t phase;
	bool is_enabled;
	bool delay_pcie_doorbell;
	bool has_shadow_doorbell;
	struct {
		uint8_t phase			: 1;
		uint8_t is_enabled		: 1;
		uint8_t delay_pcie_doorbell	: 1;
		uint8_t has_shadow_doorbell	: 1;
	} flags;

	/*
	 * Base qpair structure.
@@ -675,7 +677,7 @@ nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr)
	}

	pqpair->num_entries = NVME_ADMIN_ENTRIES;
	pqpair->delay_pcie_doorbell = false;
	pqpair->flags.delay_pcie_doorbell = 0;

	ctrlr->adminq = &pqpair->qpair;

@@ -950,7 +952,7 @@ nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
	 *  we'll toggle the bit each time when the completion queue
	 *  rolls over.
	 */
	pqpair->phase = 1;
	pqpair->flags.phase = 1;

	memset(pqpair->cmd, 0,
	       pqpair->num_entries * sizeof(struct spdk_nvme_cmd));
@@ -1179,7 +1181,7 @@ nvme_pcie_qpair_ring_sq_doorbell(struct spdk_nvme_qpair *qpair)
	struct nvme_pcie_ctrlr	*pctrlr = nvme_pcie_ctrlr(qpair->ctrlr);
	bool need_mmio = true;

	if (spdk_unlikely(pqpair->has_shadow_doorbell)) {
	if (spdk_unlikely(pqpair->flags.has_shadow_doorbell)) {
		need_mmio = nvme_pcie_qpair_update_mmio_required(qpair,
				pqpair->sq_tail,
				pqpair->shadow_doorbell.sq_tdbl,
@@ -1216,7 +1218,7 @@ nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracke
		SPDK_ERRLOG("sq_tail is passing sq_head!\n");
	}

	if (!pqpair->delay_pcie_doorbell) {
	if (!pqpair->flags.delay_pcie_doorbell) {
		nvme_pcie_qpair_ring_sq_doorbell(qpair);
	}
}
@@ -1389,7 +1391,7 @@ nvme_pcie_qpair_enable(struct spdk_nvme_qpair *qpair)
{
	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);

	pqpair->is_enabled = true;
	pqpair->flags.is_enabled = true;
	if (nvme_qpair_is_io_queue(qpair)) {
		nvme_pcie_io_qpair_enable(qpair);
	} else {
@@ -1415,7 +1417,7 @@ nvme_pcie_qpair_disable(struct spdk_nvme_qpair *qpair)
{
	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);

	pqpair->is_enabled = false;
	pqpair->flags.is_enabled = false;
	if (nvme_qpair_is_io_queue(qpair)) {
		nvme_pcie_io_qpair_disable(qpair);
	} else {
@@ -1576,9 +1578,9 @@ _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
						      pctrlr->doorbell_stride_u32;
		pqpair->shadow_doorbell.cq_eventidx = ctrlr->eventidx + (2 * qpair->id + 1) *
						      pctrlr->doorbell_stride_u32;
		pqpair->has_shadow_doorbell = true;
		pqpair->flags.has_shadow_doorbell = 1;
	} else {
		pqpair->has_shadow_doorbell = false;
		pqpair->flags.has_shadow_doorbell = 0;
	}
	nvme_pcie_qpair_reset(qpair);

@@ -1602,7 +1604,7 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
	}

	pqpair->num_entries = opts->io_queue_size;
	pqpair->delay_pcie_doorbell = opts->delay_pcie_doorbell;
	pqpair->flags.delay_pcie_doorbell = opts->delay_pcie_doorbell;

	qpair = &pqpair->qpair;

@@ -1936,11 +1938,11 @@ nvme_pcie_qpair_check_enabled(struct spdk_nvme_qpair *qpair)
{
	struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);

	if (!pqpair->is_enabled &&
	if (!pqpair->flags.is_enabled &&
	    !qpair->ctrlr->is_resetting) {
		nvme_qpair_enable(qpair);
	}
	return pqpair->is_enabled;
	return pqpair->flags.is_enabled;
}

int
@@ -1960,7 +1962,7 @@ nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_reques

	tr = TAILQ_FIRST(&pqpair->free_tr);

	if (tr == NULL || !pqpair->is_enabled) {
	if (tr == NULL || !pqpair->flags.is_enabled) {
		/*
		 * No tracker is available, or the qpair is disabled due to
		 *  an in-progress controller-level reset.
@@ -2095,7 +2097,7 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
	while (1) {
		cpl = &pqpair->cpl[pqpair->cq_head];

		if (cpl->status.p != pqpair->phase) {
		if (cpl->status.p != pqpair->flags.phase) {
			break;
		}
#if defined(__PPC64__) || defined(__aarch64__)
@@ -2109,7 +2111,7 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_

		if (spdk_unlikely(++pqpair->cq_head == pqpair->num_entries)) {
			pqpair->cq_head = 0;
			pqpair->phase = !pqpair->phase;
			pqpair->flags.phase = !pqpair->flags.phase;
		}

		tr = &pqpair->tr[cpl->cid];
@@ -2131,7 +2133,7 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
	if (num_completions > 0) {
		bool need_mmio = true;

		if (spdk_unlikely(pqpair->has_shadow_doorbell)) {
		if (spdk_unlikely(pqpair->flags.has_shadow_doorbell)) {
			need_mmio = nvme_pcie_qpair_update_mmio_required(qpair,
					pqpair->cq_head,
					pqpair->shadow_doorbell.cq_hdbl,
@@ -2145,7 +2147,7 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
		}
	}

	if (pqpair->delay_pcie_doorbell) {
	if (pqpair->flags.delay_pcie_doorbell) {
		if (pqpair->last_sq_tail != pqpair->sq_tail) {
			nvme_pcie_qpair_ring_sq_doorbell(qpair);
			pqpair->last_sq_tail = pqpair->sq_tail;