Commit cc6920a4 authored by Josh Soref's avatar Josh Soref Committed by Tomasz Zawadzki
Browse files

spelling: lib



Part of #2256

* accessible
* activation
* additional
* allocate
* association
* attempt
* barrier
* broadcast
* buffer
* calculate
* cases
* channel
* children
* command
* completion
* connect
* copied
* currently
* descriptor
* destroy
* detachment
* doesn't
* enqueueing
* exceeds
* execution
* extended
* fallback
* finalize
* first
* handling
* hugepages
* ignored
* implementation
* in_capsule
* initialization
* initialized
* initializing
* initiator
* negotiated
* notification
* occurred
* original
* outstanding
* partially
* partition
* processing
* receive
* received
* receiving
* redirected
* regions
* request
* requested
* response
* retrieved
* running
* satisfied
* should
* snapshot
* status
* succeeds
* successfully
* supplied
* those
* transferred
* translate
* triggering
* unregister
* unsupported
* urlsafe
* virtqueue
* volumes
* workaround
* zeroed

Change-Id: I569218754bd9d332ba517d4a61ad23d29eedfd0c
Signed-off-by: default avatarJosh Soref <jsoref@gmail.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10405


Reviewed-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 56f8181a
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -45,8 +45,8 @@

/* Accelerator Engine Framework: The following provides a top level
 * generic API for the accelerator functions defined here. Modules,
 * such as the one in /module/accel/ioat, supply the implemention
 * with the exception of the pure software implemention contained
 * such as the one in /module/accel/ioat, supply the implementation
 * with the exception of the pure software implementation contained
 * later in this file.
 */

@@ -895,7 +895,7 @@ spdk_accel_batch_cancel(struct spdk_io_channel *ch, struct spdk_accel_batch *bat
	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
	struct spdk_accel_task *accel_task;

	/* Cancel anything currently oustanding for this batch. */
	/* Cancel anything currently outstanding for this batch. */
	while ((batch = TAILQ_FIRST(&accel_ch->batches))) {
		TAILQ_REMOVE(&accel_ch->batches, batch, link);
		while ((accel_task = TAILQ_FIRST(&batch->hw_tasks))) {
+6 −6
Original line number Diff line number Diff line
@@ -2208,7 +2208,7 @@ blob_persist_start(struct spdk_blob_persist_ctx *ctx)
		assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages);
		ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1;
	} else {
		/* No change in size occured */
		/* No change in size occurred */
		blob_persist_generate_new_md(ctx);
		return;
	}
@@ -3591,7 +3591,7 @@ bs_delete_corrupted_blob(void *cb_arg, int bserrno)
	}

	/* Snapshot and clone have the same copy of cluster map and extent pages
	 * at this point. Let's clear both for snpashot now,
	 * at this point. Let's clear both for snapshot now,
	 * so that it won't be cleared for clone later when we remove snapshot.
	 * Also set thin provision to pass data corruption check */
	for (i = 0; i < ctx->blob->active.num_clusters; i++) {
@@ -3646,11 +3646,11 @@ bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
	}

	if (blob->parent_id == ctx->blob->id) {
		/* Power failure occured before updating clone (snapshot delete case)
		/* Power failure occurred before updating clone (snapshot delete case)
		 * or after updating clone (creating snapshot case) - keep snapshot */
		spdk_blob_close(blob, bs_update_corrupted_blob, ctx);
	} else {
		/* Power failure occured after updating clone (snapshot delete case)
		/* Power failure occurred after updating clone (snapshot delete case)
		 * or before updating clone (creating snapshot case) - remove snapshot */
		spdk_blob_close(blob, bs_delete_corrupted_blob, ctx);
	}
@@ -5727,7 +5727,7 @@ bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
		bs_snapshot_swap_cluster_maps(newblob, origblob);

		/* Newblob md sync failed. Valid clusters are only present in origblob.
		 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occured.
		 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred.
		 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */
		blob_set_thin_provision(newblob);
		assert(spdk_mem_all_zero(newblob->active.clusters,
@@ -6628,7 +6628,7 @@ delete_snapshot_update_extent_pages(void *cb_arg, int bserrno)
			continue;
		}

		/* Clone and snapshot both contain partialy filled matching extent pages.
		/* Clone and snapshot both contain partially filled matching extent pages.
		 * Update the clone extent page in place with cluster map containing the mix of both. */
		ctx->next_extent_page = i + 1;

+1 −1
Original line number Diff line number Diff line
@@ -170,7 +170,7 @@ struct spdk_blob {
	TAILQ_HEAD(, spdk_blob_persist_ctx) pending_persists;
	TAILQ_HEAD(, spdk_blob_persist_ctx) persists_to_complete;

	/* Number of data clusters retrived from extent table,
	/* Number of data clusters retrieved from extent table,
	 * that many have to be read from extent pages. */
	uint64_t	remaining_clusters_in_et;
};
+1 −1
Original line number Diff line number Diff line
@@ -203,7 +203,7 @@ parse_subsystem_event(const char *buf, struct spdk_pci_event *event)
			 * VFIO hotplug interface is "pci.c:pci_device_rte_dev_event".
			 * VFIO informs the userspace hotplug through vfio req notifier interrupt.
			 * The app needs to free the device userspace driver resource first then
			 * the OS remove the device VFIO driver and boardcast the VFIO uevent.
			 * the OS remove the device VFIO driver and broadcast the VFIO uevent.
			 */
			return 0;
		}
+2 −2
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@
 * It depends on memory usage of OCF which
 * in itself depends on the workload
 * It is a big number because OCF uses allocators
 * for every request it sends and recieves
 * for every request it sends and receives
 */
#define ENV_ALLOCATOR_NBUFS 32767
#define GET_ELEMENTS_COUNT(_limit) (_limit < 0 ? ENV_ALLOCATOR_NBUFS : _limit)
@@ -160,7 +160,7 @@ static void __attribute__((destructor)) deinit_execution_context(void)
	free(exec_context_mutex);
}

/* get_execuction_context must assure that after the call finishes, the caller
/* get_execution_context must assure that after the call finishes, the caller
 * will not get preempted from current execution context. For userspace env
 * we simulate this behavior by acquiring per execution context mutex. As a
 * result the caller might actually get preempted, but no other thread will
Loading