Fix spelling here and there as suggested by codespell. Signed-off-by: Dmitry Antipov <dmantipov@xxxxxxxxx> --- fs/gfs2/bmap.c | 2 +- fs/gfs2/dir.c | 2 +- fs/gfs2/lock_dlm.c | 4 ++-- fs/gfs2/quota.c | 2 +- fs/gfs2/recovery.c | 2 +- fs/gfs2/rgrp.c | 4 ++-- fs/gfs2/trace_gfs2.h | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 1795c4e8dbf6..100784403758 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1194,7 +1194,7 @@ const struct iomap_ops gfs2_iomap_ops = { * @inode: The inode * @lblock: The logical block number * @bh_map: The bh to be mapped - * @create: True if its ok to alloc blocks to satify the request + * @create: True if its ok to alloc blocks to satisfy the request * * The size of the requested mapping is defined in bh_map->b_size. * diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 85736135bcf5..dc5879e055ab 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -1259,7 +1259,7 @@ static int compare_dents(const void *a, const void *b) * @sort_start: index of the directory array to start our sort * @copied: pointer to int that's non-zero if a entry has been copied out * - * Jump through some hoops to make sure that if there are hash collsions, + * Jump through some hoops to make sure that if there are hash collisions, * they are read out at the beginning of a buffer. We want to minimize * the possibility that they will fall into different readdir buffers or * that someone will want to seek to that location. diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 58aeeae7ed8c..e8099e293212 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -425,7 +425,7 @@ static void gdlm_cancel(struct gfs2_glock *gl) * 10. gfs2_control sets control_lock lvb = new gen + bits for failed jids * 12. gfs2_recover does journal recoveries for failed jids identified above * 14. gfs2_control clears control_lock lvb bits for recovered jids - * 15. gfs2_control checks if recover_block == recover_start (step 3 occured + * 15. gfs2_control checks if recover_block == recover_start (step 3 occurred * again) then do nothing, otherwise if recover_start > recover_block * then clear BLOCK_LOCKS. * @@ -823,7 +823,7 @@ static void gfs2_control_func(struct work_struct *work) /* * No more jid bits set in lvb, all recovery is done, unblock locks - * (unless a new recover_prep callback has occured blocking locks + * (unless a new recover_prep callback has occurred blocking locks * again while working above) */ diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index d919edfb8dda..2989f5f3295e 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -11,7 +11,7 @@ * avoids the bottleneck of constantly touching the quota file, but introduces * fuzziness in the current usage value of IDs that are being used on different * nodes in the cluster simultaneously. So, it is possible for a user on - * multiple nodes to overrun their quota, but that overrun is controlable. + * multiple nodes to overrun their quota, but that overrun is controllable. * Since quota tags are part of transactions, there is no need for a quota check * program to be run on node crashes or anything like that. * diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 527353c36aa5..779db78f9c80 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -154,7 +154,7 @@ int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh, * @blk: the block to look at * @head: the log header to return * - * Read the log header for a given segement in a given journal. Do a few + * Read the log header for a given segment in a given journal. Do a few * sanity checks on it. * * Returns: 0 on success, diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index b14e54b38ee8..a0aec705c5ee 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1028,7 +1028,7 @@ static int gfs2_ri_update(struct gfs2_inode *ip) * special file, which might have been updated if someone expanded the * filesystem (via gfs2_grow utility), which adds new resource groups. * - * Returns: 0 on succeess, error code otherwise + * Returns: 0 on success, error code otherwise */ int gfs2_rindex_update(struct gfs2_sbd *sdp) @@ -1912,7 +1912,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip * the lock for this rgrp's glock is significantly greater than the * time taken for resource groups on average. We introduce a margin in * the form of the variable @var which is computed as the sum of the two - * respective variences, and multiplied by a factor depending on @loops + * respective variances, and multiplied by a factor depending on @loops * and whether we have a lot of data to base the decision on. This is * then tested against the square difference of the means in order to * decide whether the result is statistically significant or not. diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h index 8eae8d62a413..04535e07d090 100644 --- a/fs/gfs2/trace_gfs2.h +++ b/fs/gfs2/trace_gfs2.h @@ -429,7 +429,7 @@ TRACE_EVENT(gfs2_ail_flush, * Objectives: * Latency: Bmap request time * Performance: Block allocator tracing - * Correctness: Test of disard generation vs. blocks allocated + * Correctness: Test of discard generation vs. blocks allocated */ /* Map an extent of blocks, possibly a new allocation */ -- 2.47.1