Formats of pr_* do not include newline in the end, so add it. Signed-off-by: Chengguang Xu <cgxu519@xxxxxxxxxx> --- drivers/md/bcache/bset.c | 2 +- drivers/md/bcache/btree.c | 8 +++--- drivers/md/bcache/io.c | 2 +- drivers/md/bcache/journal.c | 20 +++++++-------- drivers/md/bcache/request.c | 4 +-- drivers/md/bcache/super.c | 62 ++++++++++++++++++++++----------------------- drivers/md/bcache/sysfs.c | 2 +- 7 files changed, 50 insertions(+), 50 deletions(-) diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index e56d3ec..aa6b08d 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1175,7 +1175,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out, out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; - pr_debug("sorted %i keys", out->keys); + pr_debug("sorted %i keys\n", out->keys); } static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index fad9fe8..8516294 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -810,7 +810,7 @@ int bch_btree_cache_alloc(struct cache_set *c) c->shrink.batch = c->btree_pages * 2; if (register_shrinker(&c->shrink)) - pr_warn("bcache: %s: could not register shrinker", + pr_warn("bcache: %s: could not register shrinker\n", __func__); return 0; @@ -1750,7 +1750,7 @@ static void bch_btree_gc(struct cache_set *c) cond_resched(); if (ret && ret != -EAGAIN) - pr_warn("gc failed!"); + pr_warn("gc failed!\n"); } while (ret); bch_btree_gc_finish(c); @@ -2235,7 +2235,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys, if (ret) { struct bkey *k; - pr_err("error %i", ret); + pr_err("error %i\n", ret); while ((k = bch_keylist_pop(keys))) bkey_put(c, k); @@ -2519,7 +2519,7 @@ struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, break; if (bkey_cmp(&buf->last_scanned, end) >= 0) { - pr_debug("scan finished"); + pr_debug("scan finished\n"); break; } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index a783c5a..a2cb8f0 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -97,7 +97,7 @@ void bch_count_io_errors(struct cache *ca, errors >>= IO_ERROR_SHIFT; if (errors < ca->set->error_limit) - pr_err("%s: IO error on %s%s", + pr_err("%s: IO error on %s%s\n", bdevname(ca->bdev, buf), m, is_read ? ", recovering." : "."); else diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 1b736b8..261cd0b 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -46,7 +46,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, closure_init_stack(&cl); - pr_debug("reading %u", bucket_index); + pr_debug("reading %u\n", bucket_index); while (offset < ca->sb.bucket_size) { reread: left = ca->sb.bucket_size - offset; @@ -77,13 +77,13 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, size_t blocks, bytes = set_bytes(j); if (j->magic != jset_magic(&ca->sb)) { - pr_debug("%u: bad magic", bucket_index); + pr_debug("%u: bad magic\n", bucket_index); return ret; } if (bytes > left << 9 || bytes > PAGE_SIZE << JSET_BITS) { - pr_info("%u: too big, %zu bytes, offset %u", + pr_info("%u: too big, %zu bytes, offset %u\n", bucket_index, bytes, offset); return ret; } @@ -92,7 +92,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, goto reread; if (j->csum != csum_set(j)) { - pr_info("%u: bad csum, %zu bytes, offset %u", + pr_info("%u: bad csum, %zu bytes, offset %u\n", bucket_index, bytes, offset); return ret; } @@ -163,7 +163,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) uint64_t seq; bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); - pr_debug("%u journal buckets", ca->sb.njournal_buckets); + pr_debug("%u journal buckets\n", ca->sb.njournal_buckets); /* * Read journal buckets ordered by golden ratio hash to quickly @@ -188,7 +188,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) * If that fails, check all the buckets we haven't checked * already */ - pr_debug("falling back to linear search"); + pr_debug("falling back to linear search\n"); for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); l < ca->sb.njournal_buckets; @@ -205,7 +205,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) /* Binary search */ m = l; r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); - pr_debug("starting binary search, l %u r %u", l, r); + pr_debug("starting binary search, l %u r %u\n", l, r); while (l + 1 < r) { seq = list_entry(list->prev, struct journal_replay, @@ -225,7 +225,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) * Read buckets in reverse order until we stop finding more * journal entries */ - pr_debug("finishing up: m %u njournal_buckets %u", + pr_debug("finishing up: m %u njournal_buckets %u\n", m, ca->sb.njournal_buckets); l = m; @@ -355,7 +355,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) entries++; } - pr_info("journal replay done, %i keys in %i entries, seq %llu", + pr_info("journal replay done, %i keys in %i entries, seq %llu\n", keys, entries, end); err: while (!list_empty(list)) { @@ -569,7 +569,7 @@ void bch_journal_next(struct journal *j) j->cur->data->keys = 0; if (fifo_full(&j->pin)) - pr_debug("journal_pin full (%zu)", fifo_used(&j->pin)); + pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin)); } static void journal_write_endio(struct bio *bio) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 1a46b41..515a2fb 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -121,7 +121,7 @@ static void bch_data_invalidate(struct closure *cl) struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); struct bio *bio = op->bio; - pr_debug("invalidating %i sectors from %llu", + pr_debug("invalidating %i sectors from %llu\n", bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); while (bio_sectors(bio)) { @@ -395,7 +395,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || bio_sectors(bio) & (c->sb.block_size - 1)) { - pr_debug("skipping unaligned io"); + pr_debug("skipping unaligned io\n"); goto skip; } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 3128957..1a9fdab 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -95,7 +95,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, for (i = 0; i < SB_JOURNAL_BUCKETS; i++) sb->d[i] = le64_to_cpu(s->d[i]); - pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", + pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n", sb->version, sb->flags, sb->seq, sb->keys); err = "Not a bcache superblock"; @@ -238,7 +238,7 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) out->csum = csum_set(out); - pr_debug("ver %llu, flags %llu, seq %llu", + pr_debug("ver %llu, flags %llu, seq %llu\n", sb->version, sb->flags, sb->seq); submit_bio(bio); @@ -368,11 +368,11 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, } bch_extent_to_text(buf, sizeof(buf), k); - pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); + pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf); for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) if (!bch_is_zero(u->uuid, 16)) - pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", + pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n", u - c->uuids, u->uuid, u->label, u->first_reg, u->last_reg, u->invalidated); @@ -540,7 +540,7 @@ void bch_prio_write(struct cache *ca) atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), &ca->meta_sectors_written); - //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), + //pr_debug("free %zu, free_inc %zu, unused %zu\n", fifo_used(&ca->free), // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); for (i = prio_buckets(ca) - 1; i >= 0; --i) { @@ -609,10 +609,10 @@ static void prio_read(struct cache *ca, uint64_t bucket) prio_io(ca, bucket, REQ_OP_READ, 0); if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) - pr_warn("bad csum reading priorities"); + pr_warn("bad csum reading priorities\n"); if (p->magic != pset_magic(&ca->sb)) - pr_warn("bad magic reading priorities"); + pr_warn("bad magic reading priorities\n"); bucket = p->next_bucket; d = p->data; @@ -743,7 +743,7 @@ static void bcache_device_free(struct bcache_device *d) { lockdep_assert_held(&bch_register_lock); - pr_info("%s stopped", d->disk->disk_name); + pr_info("%s stopped\n", d->disk->disk_name); if (d->c) bcache_device_detach(d); @@ -780,7 +780,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, if (!d->nr_stripes || d->nr_stripes > INT_MAX || d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { - pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", + pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)\n", (unsigned)d->nr_stripes); return -ENOMEM; } @@ -896,7 +896,7 @@ void bch_cached_dev_run(struct cached_dev *dc) if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) - pr_debug("error creating sysfs link"); + pr_debug("error creating sysfs link\n"); } static void cached_dev_detach_finish(struct work_struct *w) @@ -931,7 +931,7 @@ static void cached_dev_detach_finish(struct work_struct *w) mutex_unlock(&bch_register_lock); - pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); + pr_info("Caching disabled for %s\n", bdevname(dc->bdev, buf)); /* Drop ref we took in cached_dev_detach() */ closure_put(&dc->disk.cl); @@ -971,18 +971,18 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, return -ENOENT; if (dc->disk.c) { - pr_err("Can't attach %s: already attached", buf); + pr_err("Can't attach %s: already attached\n", buf); return -EINVAL; } if (test_bit(CACHE_SET_STOPPING, &c->flags)) { - pr_err("Can't attach %s: shutting down", buf); + pr_err("Can't attach %s: shutting down\n", buf); return -EINVAL; } if (dc->sb.block_size < c->sb.block_size) { /* Will die */ - pr_err("Couldn't attach %s: block size less than set's block size", + pr_err("Couldn't attach %s: block size less than set's block size\n", buf); return -EINVAL; } @@ -999,13 +999,13 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, if (!u) { if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { - pr_err("Couldn't find uuid for %s in set", buf); + pr_err("Couldn't find uuid for %s in set\n", buf); return -ENOENT; } u = uuid_find_empty(c); if (!u) { - pr_err("Not caching %s, no room for UUID", buf); + pr_err("Not caching %s, no room for UUID\n", buf); return -EINVAL; } } @@ -1064,7 +1064,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, /* Allow the writeback thread to proceed */ up_write(&dc->writeback_lock); - pr_info("Caching %s as %s on set %pU", + pr_info("Caching %s as %s on set %pU\n", bdevname(dc->bdev, buf), dc->disk.disk->disk_name, dc->disk.c->sb.set_uuid); return 0; @@ -1192,7 +1192,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) goto err; - pr_info("registered backing device %s", bdevname(bdev, name)); + pr_info("registered backing device %s\n", bdevname(bdev, name)); list_add(&dc->list, &uncached_devices); list_for_each_entry(c, &bch_cache_sets, list) @@ -1204,7 +1204,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, return; err: - pr_notice("error opening %s: %s", bdevname(bdev, name), err); + pr_notice("error opening %s: %s\n", bdevname(bdev, name), err); bcache_device_stop(&dc->disk); } @@ -1294,7 +1294,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size) u = uuid_find_empty(c); if (!u) { - pr_err("Can't create volume, no room for UUID"); + pr_err("Can't create volume, no room for UUID\n"); return -EINVAL; } @@ -1386,7 +1386,7 @@ static void cache_set_free(struct closure *cl) list_del(&c->list); mutex_unlock(&bch_register_lock); - pr_info("Cache set %pU unregistered", c->sb.set_uuid); + pr_info("Cache set %pU unregistered\n", c->sb.set_uuid); wake_up(&unregister_wait); closure_debug_destroy(&c->cl); @@ -1586,7 +1586,7 @@ static void run_cache_set(struct cache_set *c) if (bch_journal_read(c, &journal)) goto err; - pr_debug("btree_journal_read() done"); + pr_debug("btree_journal_read() done\n"); err = "no journal entries found"; if (list_empty(&journal)) @@ -1628,7 +1628,7 @@ static void run_cache_set(struct cache_set *c) bch_journal_mark(c, &journal); bch_initial_gc_finish(c); - pr_debug("btree_check() done"); + pr_debug("btree_check() done\n"); /* * bcache_journal_next() can't happen sooner, or @@ -1657,7 +1657,7 @@ static void run_cache_set(struct cache_set *c) bch_journal_replay(c, &journal); } else { - pr_notice("invalidating existing data"); + pr_notice("invalidating existing data\n"); for_each_cache(ca, c, i) { unsigned j; @@ -1783,7 +1783,7 @@ static const char *register_cache_set(struct cache *ca) memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); c->sb.flags = ca->sb.flags; c->sb.seq = ca->sb.seq; - pr_debug("set version = %llu", c->sb.version); + pr_debug("set version = %llu\n", c->sb.version); } kobject_get(&ca->kobj); @@ -1918,14 +1918,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, goto out; } - pr_info("registered cache device %s", bdevname(bdev, name)); + pr_info("registered cache device %s\n", bdevname(bdev, name)); out: kobject_put(&ca->kobj); err: if (err) - pr_notice("error opening %s: %s", bdevname(bdev, name), err); + pr_notice("error opening %s: %s\n", bdevname(bdev, name), err); return ret; } @@ -2041,7 +2041,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, err_close: blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); err: - pr_info("error opening %s: %s", path, err); + pr_info("error opening %s: %s\n", path, err); ret = -EINVAL; goto out; } @@ -2064,7 +2064,7 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) list_empty(&uncached_devices)) goto out; - pr_info("Stopping all devices:"); + pr_info("Stopping all devices:\n"); list_for_each_entry_safe(c, tc, &bch_cache_sets, list) bch_cache_set_stop(c); @@ -2093,9 +2093,9 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) finish_wait(&unregister_wait, &wait); if (stopped) - pr_info("All devices stopped"); + pr_info("All devices stopped\n"); else - pr_notice("Timeout waiting for devices to be closed"); + pr_notice("Timeout waiting for devices to be closed\n"); out: mutex_unlock(&bch_register_lock); } diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 78cd7bd..4eae367 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -284,7 +284,7 @@ return size; } - pr_err("Can't attach %s: cache set not found", buf); + pr_err("Can't attach %s: cache set not found\n", buf); return v; } -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-bcache" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html