]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update bcachefs sources to 70fa0c1ff4 fixup! bcachefs: Btree key cache improvements
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 23 Oct 2022 21:39:22 +0000 (17:39 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 23 Oct 2022 22:24:08 +0000 (18:24 -0400)
41 files changed:
.bcachefs_revision
include/linux/rcupdate.h
include/trace/events/bcachefs.h
libbcachefs/alloc_background.h
libbcachefs/backpointers.c
libbcachefs/backpointers.h
libbcachefs/bcachefs.h
libbcachefs/bcachefs_format.h
libbcachefs/bcachefs_ioctl.h
libbcachefs/bkey.c
libbcachefs/bkey_methods.c
libbcachefs/bkey_methods.h
libbcachefs/btree_cache.c
libbcachefs/btree_gc.c
libbcachefs/btree_io.c
libbcachefs/btree_iter.c
libbcachefs/btree_key_cache.c
libbcachefs/btree_key_cache.h
libbcachefs/btree_locking.h
libbcachefs/btree_types.h
libbcachefs/btree_update_interior.c
libbcachefs/buckets.c
libbcachefs/checksum.h
libbcachefs/dirent.h
libbcachefs/ec.h
libbcachefs/errcode.c
libbcachefs/extents.h
libbcachefs/fifo.h
libbcachefs/fsck.c
libbcachefs/inode.h
libbcachefs/journal.h
libbcachefs/lru.h
libbcachefs/quota.h
libbcachefs/recovery.c
libbcachefs/reflink.h
libbcachefs/replicas_types.h
libbcachefs/subvolume.c
libbcachefs/subvolume.h
libbcachefs/sysfs.c
libbcachefs/tests.c
libbcachefs/xattr.h

index e8327f59bd4e74630451354eb27658a27853cda6..206b9d4d94ab0203a763319ff6b2af4799ef51a6 100644 (file)
@@ -1 +1 @@
-cd779e0cc51cb232d17eec4537cb4769af202b5f
+70fa0c1ff48feba041a8243b1a52ee57cffe1e0e
index ae292241c82c5f8ce361443d1f53a3e91e4737a0..3db40cb49cb39b41d5f6e3c60fd7fdcb428c53e5 100644 (file)
@@ -4,6 +4,8 @@
 #include <urcu.h>
 #include <linux/compiler.h>
 
+#define ULONG_CMP_GE(a, b)      (ULONG_MAX / 2 >= (a) - (b))
+
 #define rcu_dereference_check(p, c)    rcu_dereference(p)
 #define rcu_dereference_raw(p)         rcu_dereference(p)
 #define rcu_dereference_protected(p, c)        rcu_dereference(p)
index d3d9e965e7020efb96b4eff9a03f89a19d3a88f6..444f43f0474d90ef736464b0ef37f1f4bd656f1c 100644 (file)
@@ -354,7 +354,7 @@ TRACE_EVENT(btree_reserve_get_fail,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
                __entry->caller_ip      = caller_ip;
                __entry->required       = required;
        ),
@@ -411,7 +411,7 @@ TRACE_EVENT(btree_path_relock_fail,
        TP_fast_assign(
                struct btree *b = btree_path_node(path, level);
 
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->btree_id               = path->btree_id;
                __entry->level                  = path->level;
@@ -462,7 +462,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
        TP_fast_assign(
                struct six_lock_count c;
 
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->btree_id               = path->btree_id;
                __entry->level                  = level;
@@ -524,7 +524,7 @@ TRACE_EVENT(bucket_alloc,
 
        TP_fast_assign(
                __entry->dev            = ca->dev;
-               strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+               strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
                __entry->user           = user;
                __entry->bucket         = bucket;
        ),
@@ -568,7 +568,7 @@ TRACE_EVENT(bucket_alloc_fail,
 
        TP_fast_assign(
                __entry->dev            = ca->dev;
-               strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+               strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
                __entry->free           = free;
                __entry->avail          = avail;
                __entry->copygc_wait_amount     = copygc_wait_amount;
@@ -578,7 +578,7 @@ TRACE_EVENT(bucket_alloc_fail,
                __entry->need_journal_commit = need_journal_commit;
                __entry->nouse          = nouse;
                __entry->nonblocking    = nonblocking;
-               strlcpy(__entry->err, err, sizeof(__entry->err));
+               strscpy(__entry->err, err, sizeof(__entry->err));
        ),
 
        TP_printk("%d,%d reserve %s free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s",
@@ -616,7 +616,7 @@ TRACE_EVENT(discard_buckets,
                __entry->open                   = open;
                __entry->need_journal_commit    = need_journal_commit;
                __entry->discarded              = discarded;
-               strlcpy(__entry->err, err, sizeof(__entry->err));
+               strscpy(__entry->err, err, sizeof(__entry->err));
        ),
 
        TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
@@ -766,7 +766,7 @@ DECLARE_EVENT_CLASS(transaction_event,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
        ),
 
@@ -811,7 +811,7 @@ TRACE_EVENT(trans_restart_journal_preres_get,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->flags                  = flags;
        ),
@@ -871,7 +871,7 @@ DECLARE_EVENT_CLASS(transaction_restart_iter,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->btree_id               = path->btree_id;
                TRACE_BPOS_assign(pos, path->pos)
@@ -918,7 +918,7 @@ TRACE_EVENT(trans_restart_upgrade,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->btree_id               = path->btree_id;
                __entry->old_locks_want         = old_locks_want;
@@ -1027,7 +1027,7 @@ TRACE_EVENT(trans_restart_would_deadlock_write,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
        ),
 
        TP_printk("%s", __entry->trans_fn)
@@ -1046,7 +1046,7 @@ TRACE_EVENT(trans_restart_mem_realloced,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip      = caller_ip;
                __entry->bytes          = bytes;
        ),
@@ -1075,7 +1075,7 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
 
                __entry->btree_id       = path->btree_id;
index 044bc72992d4186d551da21ba63defa09334da5e..ee683bdde956c7b08268feaf9d2bfe019d7b6ba8 100644 (file)
@@ -103,34 +103,34 @@ int bch2_alloc_v4_invalid(const struct bch_fs *, struct bkey_s_c, int, struct pr
 void bch2_alloc_v4_swab(struct bkey_s);
 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_alloc (struct bkey_ops) {                \
+#define bch2_bkey_ops_alloc ((struct bkey_ops) {       \
        .key_invalid    = bch2_alloc_v1_invalid,        \
        .val_to_text    = bch2_alloc_to_text,           \
        .trans_trigger  = bch2_trans_mark_alloc,        \
        .atomic_trigger = bch2_mark_alloc,              \
-}
+})
 
-#define bch2_bkey_ops_alloc_v2 (struct bkey_ops) {     \
+#define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) {    \
        .key_invalid    = bch2_alloc_v2_invalid,        \
        .val_to_text    = bch2_alloc_to_text,           \
        .trans_trigger  = bch2_trans_mark_alloc,        \
        .atomic_trigger = bch2_mark_alloc,              \
-}
+})
 
-#define bch2_bkey_ops_alloc_v3 (struct bkey_ops) {     \
+#define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) {    \
        .key_invalid    = bch2_alloc_v3_invalid,        \
        .val_to_text    = bch2_alloc_to_text,           \
        .trans_trigger  = bch2_trans_mark_alloc,        \
        .atomic_trigger = bch2_mark_alloc,              \
-}
+})
 
-#define bch2_bkey_ops_alloc_v4 (struct bkey_ops) {     \
+#define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) {    \
        .key_invalid    = bch2_alloc_v4_invalid,        \
        .val_to_text    = bch2_alloc_to_text,           \
        .swab           = bch2_alloc_v4_swab,           \
        .trans_trigger  = bch2_trans_mark_alloc,        \
        .atomic_trigger = bch2_mark_alloc,              \
-}
+})
 
 static inline bool bkey_is_alloc(const struct bkey *k)
 {
index d74de1df7aa3433a9f2c284e96ab66bc9086f5db..dace68e208b670aa05cc3a4400155cac4a385967 100644 (file)
@@ -984,9 +984,8 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
                        break;
 
                if (!bpos_cmp(start, POS_MIN) && bpos_cmp(end, SPOS_MAX))
-                       bch_verbose(c, "check_extents_to_backpointers(): alloc info does not fit in ram,"
-                                   "running in multiple passes with %zu nodes per pass",
-                                   btree_nodes_fit_in_ram(c));
+                       bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
+                                   __func__, btree_nodes_fit_in_ram(c));
 
                if (bpos_cmp(start, POS_MIN) || bpos_cmp(end, SPOS_MAX)) {
                        struct printbuf buf = PRINTBUF;
@@ -1099,9 +1098,8 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c)
 
                if (!bbpos_cmp(start, BBPOS_MIN) &&
                    bbpos_cmp(end, BBPOS_MAX))
-                       bch_verbose(c, "check_backpointers_to_extents(): extents do not fit in ram,"
-                                   "running in multiple passes with %zu nodes per pass",
-                                   btree_nodes_fit_in_ram(c));
+                       bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
+                                   __func__, btree_nodes_fit_in_ram(c));
 
                if (bbpos_cmp(start, BBPOS_MIN) ||
                    bbpos_cmp(end, BBPOS_MAX)) {
index 1c97e364c53200170d89485752124c6c15bd420b..8c58f9298280121ee221a1f0411ef85514ddcb8a 100644 (file)
@@ -10,11 +10,11 @@ void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *)
 void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 void bch2_backpointer_swab(struct bkey_s);
 
-#define bch2_bkey_ops_backpointer (struct bkey_ops) {  \
+#define bch2_bkey_ops_backpointer ((struct bkey_ops) { \
        .key_invalid    = bch2_backpointer_invalid,     \
        .val_to_text    = bch2_backpointer_k_to_text,   \
        .swab           = bch2_backpointer_swab,        \
-}
+})
 
 void bch2_extent_ptr_to_bp(struct bch_fs *, enum btree_id, unsigned,
                           struct bkey_s_c, struct extent_ptr_decoded,
index 33186fa82682e037af78dcb3543d5bbf3049e26b..fcbe8f8c1acf6664cf1132ada4c07dbe00e1d97b 100644 (file)
@@ -282,7 +282,7 @@ do {                                                                        \
                "When reading btree nodes, read all replicas and "      \
                "compare them")
 
-/* Parameters that should only be compiled in in debug mode: */
+/* Parameters that should only be compiled in debug mode: */
 #define BCH_DEBUG_PARAMS_DEBUG()                                       \
        BCH_DEBUG_PARAM(expensive_debug_checks,                         \
                "Enables various runtime debugging checks that "        \
index bfcb75a361cb4c1edf231ca0fce8ae235dad2984..5da9f3a4d47d9ea9131a0bb92528ce019f498bfd 100644 (file)
@@ -147,7 +147,7 @@ struct bpos {
 #else
 #error edit for your odd byteorder.
 #endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
 
 #define KEY_INODE_MAX                  ((__u64)~0ULL)
 #define KEY_OFFSET_MAX                 ((__u64)~0ULL)
@@ -181,7 +181,7 @@ struct bversion {
        __u32           hi;
        __u64           lo;
 #endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
 
 struct bkey {
        /* Size of combined key and value, in u64s */
@@ -214,7 +214,7 @@ struct bkey {
 
        __u8            pad[1];
 #endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bkey_packed {
        __u64           _data[0];
@@ -248,7 +248,7 @@ struct bkey_packed {
         * to the same size as struct bkey should hopefully be safest.
         */
        __u8            pad[sizeof(struct bkey) - 3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BKEY_U64s                      (sizeof(struct bkey) / sizeof(__u64))
 #define BKEY_U64s_MAX                  U8_MAX
@@ -478,7 +478,7 @@ struct bch_set {
 struct bch_csum {
        __le64                  lo;
        __le64                  hi;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_EXTENT_ENTRY_TYPES()               \
        x(ptr,                  0)              \
@@ -515,7 +515,7 @@ struct bch_extent_crc32 {
                                _compressed_size:7,
                                type:2;
 #endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define CRC32_SIZE_MAX         (1U << 7)
 #define CRC32_NONCE_MAX                0
@@ -541,7 +541,7 @@ struct bch_extent_crc64 {
                                type:3;
 #endif
        __u64                   csum_lo;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define CRC64_SIZE_MAX         (1U << 9)
 #define CRC64_NONCE_MAX                ((1U << 10) - 1)
@@ -565,7 +565,7 @@ struct bch_extent_crc128 {
                                type:4;
 #endif
        struct bch_csum         csum;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define CRC128_SIZE_MAX                (1U << 13)
 #define CRC128_NONCE_MAX       ((1U << 13) - 1)
@@ -591,7 +591,7 @@ struct bch_extent_ptr {
                                cached:1,
                                type:1;
 #endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_extent_stripe_ptr {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -643,7 +643,7 @@ struct bch_btree_ptr {
 
        __u64                   _data[0];
        struct bch_extent_ptr   start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_btree_ptr_v2 {
        struct bch_val          v;
@@ -655,7 +655,7 @@ struct bch_btree_ptr_v2 {
        struct bpos             min_key;
        __u64                   _data[0];
        struct bch_extent_ptr   start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,  struct bch_btree_ptr_v2, flags, 0, 1);
 
@@ -664,7 +664,7 @@ struct bch_extent {
 
        __u64                   _data[0];
        union bch_extent_entry  start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_reservation {
        struct bch_val          v;
@@ -672,7 +672,7 @@ struct bch_reservation {
        __le32                  generation;
        __u8                    nr_replicas;
        __u8                    pad[3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Maximum size (in u64s) a single pointer could be: */
 #define BKEY_EXTENT_PTR_U64s_MAX\
@@ -706,7 +706,7 @@ struct bch_inode {
        __le32                  bi_flags;
        __le16                  bi_mode;
        __u8                    fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_inode_v2 {
        struct bch_val          v;
@@ -716,7 +716,7 @@ struct bch_inode_v2 {
        __le64                  bi_flags;
        __le16                  bi_mode;
        __u8                    fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_inode_v3 {
        struct bch_val          v;
@@ -728,7 +728,7 @@ struct bch_inode_v3 {
        __le64                  bi_size;
        __le64                  bi_version;
        __u8                    fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define INODEv3_FIELDS_START_INITIAL   6
 #define INODEv3_FIELDS_START_CUR       (offsetof(struct bch_inode_v3, fields) / sizeof(u64))
@@ -738,7 +738,7 @@ struct bch_inode_generation {
 
        __le32                  bi_generation;
        __le32                  pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /*
  * bi_subvol and bi_parent_subvol are only set for subvolume roots:
@@ -891,7 +891,7 @@ struct bch_dirent {
        __u8                    d_type;
 
        __u8                    d_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define DT_SUBVOL      16
 #define BCH_DT_MAX     17
@@ -914,7 +914,7 @@ struct bch_xattr {
        __u8                    x_name_len;
        __le16                  x_val_len;
        __u8                    x_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Bucket/allocation information: */
 
@@ -923,7 +923,7 @@ struct bch_alloc {
        __u8                    fields;
        __u8                    gen;
        __u8                    data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_ALLOC_FIELDS_V1()                  \
        x(read_time,            16)             \
@@ -948,7 +948,7 @@ struct bch_alloc_v2 {
        __u8                    oldest_gen;
        __u8                    data_type;
        __u8                    data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_ALLOC_FIELDS_V2()                  \
        x(read_time,            64)             \
@@ -967,7 +967,7 @@ struct bch_alloc_v3 {
        __u8                    oldest_gen;
        __u8                    data_type;
        __u8                    data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags,  0,  1)
 LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags,  1,  2)
@@ -985,7 +985,7 @@ struct bch_alloc_v4 {
        __u64                   io_time[2];
        __u32                   stripe;
        __u32                   nr_external_backpointers;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_ALLOC_V4_U64s_V0   6
 #define BCH_ALLOC_V4_U64s      (sizeof(struct bch_alloc_v4) / sizeof(u64))
@@ -1005,7 +1005,7 @@ struct bch_backpointer {
        __u64                   bucket_offset:40;
        __u32                   bucket_len;
        struct bpos             pos;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Quotas: */
 
@@ -1030,7 +1030,7 @@ struct bch_quota_counter {
 struct bch_quota {
        struct bch_val          v;
        struct bch_quota_counter c[Q_COUNTERS];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Erasure coding */
 
@@ -1046,7 +1046,7 @@ struct bch_stripe {
        __u8                    pad;
 
        struct bch_extent_ptr   ptrs[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Reflink: */
 
@@ -1063,14 +1063,14 @@ struct bch_reflink_p {
         */
        __le32                  front_pad;
        __le32                  back_pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_reflink_v {
        struct bch_val          v;
        __le64                  refcount;
        union bch_extent_entry  start[0];
        __u64                   _data[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_indirect_inline_data {
        struct bch_val          v;
@@ -1127,7 +1127,7 @@ LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags,  1,  2)
 struct bch_lru {
        struct bch_val          v;
        __le64                  idx;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define LRU_ID_STRIPES         (1U << 16)
 
@@ -1326,19 +1326,19 @@ struct bch_replicas_entry_v0 {
        __u8                    data_type;
        __u8                    nr_devs;
        __u8                    devs[];
-} __attribute__((packed));
+} __packed;
 
 struct bch_sb_field_replicas_v0 {
        struct bch_sb_field     field;
        struct bch_replicas_entry_v0 entries[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_replicas_entry {
        __u8                    data_type;
        __u8                    nr_devs;
        __u8                    nr_required;
        __u8                    devs[];
-} __attribute__((packed));
+} __packed;
 
 #define replicas_entry_bytes(_i)                                       \
        (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
@@ -1346,7 +1346,7 @@ struct bch_replicas_entry {
 struct bch_sb_field_replicas {
        struct bch_sb_field     field;
        struct bch_replicas_entry entries[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* BCH_SB_FIELD_quota: */
 
@@ -1363,7 +1363,7 @@ struct bch_sb_quota_type {
 struct bch_sb_field_quota {
        struct bch_sb_field             field;
        struct bch_sb_quota_type        q[QTYP_NR];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* BCH_SB_FIELD_disk_groups: */
 
@@ -1372,7 +1372,7 @@ struct bch_sb_field_quota {
 struct bch_disk_group {
        __u8                    label[BCH_SB_LABEL_SIZE];
        __le64                  flags[2];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE64_BITMASK(BCH_GROUP_DELETED,                struct bch_disk_group, flags[0], 0,  1)
 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED,   struct bch_disk_group, flags[0], 1,  6)
@@ -1381,7 +1381,7 @@ LE64_BITMASK(BCH_GROUP_PARENT,            struct bch_disk_group, flags[0], 6, 24)
 struct bch_sb_field_disk_groups {
        struct bch_sb_field     field;
        struct bch_disk_group   entries[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* BCH_SB_FIELD_counters */
 
@@ -1565,7 +1565,7 @@ struct bch_sb_layout {
        __u8                    nr_superblocks;
        __u8                    pad[5];
        __le64                  sb_offset[61];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_SB_LAYOUT_SECTOR   7
 
@@ -1616,7 +1616,7 @@ struct bch_sb {
                struct bch_sb_field start[0];
                __le64          _data[0];
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /*
  * Flags:
@@ -1899,6 +1899,7 @@ enum bch_compression_opts {
 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
 {
        __le64 ret;
+
        memcpy(&ret, &sb->uuid, sizeof(ret));
        return ret;
 }
@@ -1973,26 +1974,26 @@ enum {
 struct jset_entry_usage {
        struct jset_entry       entry;
        __le64                  v;
-} __attribute__((packed));
+} __packed;
 
 struct jset_entry_data_usage {
        struct jset_entry       entry;
        __le64                  v;
        struct bch_replicas_entry r;
-} __attribute__((packed));
+} __packed;
 
 struct jset_entry_clock {
        struct jset_entry       entry;
        __u8                    rw;
        __u8                    pad[7];
        __le64                  time;
-} __attribute__((packed));
+} __packed;
 
 struct jset_entry_dev_usage_type {
        __le64                  buckets;
        __le64                  sectors;
        __le64                  fragmented;
-} __attribute__((packed));
+} __packed;
 
 struct jset_entry_dev_usage {
        struct jset_entry       entry;
@@ -2003,7 +2004,7 @@ struct jset_entry_dev_usage {
        __le64                  _buckets_unavailable; /* No longer used */
 
        struct jset_entry_dev_usage_type d[];
-} __attribute__((packed));
+} __packed;
 
 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
 {
@@ -2014,7 +2015,7 @@ static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage
 struct jset_entry_log {
        struct jset_entry       entry;
        u8                      d[];
-} __attribute__((packed));
+} __packed;
 
 /*
  * On disk format for a journal entry:
@@ -2049,7 +2050,7 @@ struct jset {
                struct jset_entry start[0];
                __u64           _data[0];
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE32_BITMASK(JSET_CSUM_TYPE,   struct jset, flags, 0, 4);
 LE32_BITMASK(JSET_BIG_ENDIAN,  struct jset, flags, 4, 5);
@@ -2112,7 +2113,7 @@ struct bset {
                struct bkey_packed start[0];
                __u64           _data[0];
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE32_BITMASK(BSET_CSUM_TYPE,   struct bset, flags, 0, 4);
 
@@ -2145,7 +2146,7 @@ struct btree_node {
 
        };
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE64_BITMASK(BTREE_NODE_ID,    struct btree_node, flags,  0,  4);
 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags,  4,  8);
@@ -2166,6 +2167,6 @@ struct btree_node_entry {
 
        };
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #endif /* _BCACHEFS_FORMAT_H */
index b2edabf58260d4ea1e312aae44cfee68b25fa810..ad47a506a907651bf1ead3bfa0c29c3efb04cc05 100644 (file)
@@ -208,7 +208,7 @@ struct bch_ioctl_data {
                __u64           pad[8];
        };
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 enum bch_data_event {
        BCH_DATA_EVENT_PROGRESS = 0,
@@ -224,7 +224,7 @@ struct bch_ioctl_data_progress {
 
        __u64                   sectors_done;
        __u64                   sectors_total;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_ioctl_data_event {
        __u8                    type;
@@ -233,12 +233,12 @@ struct bch_ioctl_data_event {
        struct bch_ioctl_data_progress p;
        __u64                   pad2[15];
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_replicas_usage {
        __u64                   sectors;
        struct bch_replicas_entry r;
-} __attribute__((packed));
+} __packed;
 
 static inline struct bch_replicas_usage *
 replicas_usage_next(struct bch_replicas_usage *u)
index f7e5d0c377eb0cc6443244de8110c2d6423c752f..630df060fbe934b5ff8d936eed91b13b792f8f81 100644 (file)
@@ -17,9 +17,6 @@
 
 const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
 
-struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
-                             const struct bkey_packed *);
-
 void bch2_bkey_packed_to_binary_text(struct printbuf *out,
                                     const struct bkey_format *f,
                                     const struct bkey_packed *k)
index 14d910a3077ffa49a6522423563bc1de0df632fc..6939d74d705e00b05dee159b3ae1969b3882c6cb 100644 (file)
@@ -29,13 +29,13 @@ static int deleted_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
        return 0;
 }
 
-#define bch2_bkey_ops_deleted (struct bkey_ops) {      \
+#define bch2_bkey_ops_deleted ((struct bkey_ops) {     \
        .key_invalid = deleted_key_invalid,             \
-}
+})
 
-#define bch2_bkey_ops_whiteout (struct bkey_ops) {     \
+#define bch2_bkey_ops_whiteout ((struct bkey_ops) {    \
        .key_invalid = deleted_key_invalid,             \
-}
+})
 
 static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
                                 int rw, struct printbuf *err)
@@ -49,9 +49,9 @@ static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
        return 0;
 }
 
-#define bch2_bkey_ops_error (struct bkey_ops) {                \
+#define bch2_bkey_ops_error ((struct bkey_ops) {       \
        .key_invalid = empty_val_key_invalid,           \
-}
+})
 
 static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
                                   int rw, struct printbuf *err)
@@ -65,13 +65,13 @@ static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
        return 0;
 }
 
-#define bch2_bkey_ops_cookie (struct bkey_ops) {       \
+#define bch2_bkey_ops_cookie ((struct bkey_ops) {      \
        .key_invalid = key_type_cookie_invalid,         \
-}
+})
 
-#define bch2_bkey_ops_hash_whiteout (struct bkey_ops) {        \
+#define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\
        .key_invalid = empty_val_key_invalid,           \
-}
+})
 
 static int key_type_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
                                        int rw, struct printbuf *err)
@@ -89,10 +89,10 @@ static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
               datalen, min(datalen, 32U), d.v->data);
 }
 
-#define bch2_bkey_ops_inline_data (struct bkey_ops) {  \
+#define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
        .key_invalid    = key_type_inline_data_invalid, \
        .val_to_text    = key_type_inline_data_to_text, \
-}
+})
 
 static int key_type_set_invalid(const struct bch_fs *c, struct bkey_s_c k,
                                int rw, struct printbuf *err)
@@ -112,10 +112,10 @@ static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_
        return true;
 }
 
-#define bch2_bkey_ops_set (struct bkey_ops) {          \
+#define bch2_bkey_ops_set ((struct bkey_ops) {         \
        .key_invalid    = key_type_set_invalid,         \
        .key_merge      = key_type_set_merge,           \
-}
+})
 
 const struct bkey_ops bch2_bkey_ops[] = {
 #define x(name, nr) [KEY_TYPE_##name]  = bch2_bkey_ops_##name,
@@ -440,6 +440,7 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
                    btree_id == BTREE_ID_inodes) {
                        if (!bkey_packed(k)) {
                                struct bkey_i *u = packed_to_bkey(k);
+
                                swap(u->k.p.inode, u->k.p.offset);
                        } else if (f->bits_per_field[BKEY_FIELD_INODE] &&
                                   f->bits_per_field[BKEY_FIELD_OFFSET]) {
index db894b40d2ca4180e1e91f398cc3c7021fc68491..4739b3c32cff6df783dfcf16e6d219d310f08f28 100644 (file)
@@ -18,7 +18,7 @@ extern const char * const bch2_bkey_types[];
  *
  * When invalid, error string is returned via @err. @rw indicates whether key is
  * being read or written; more aggressive checks can be enabled when rw == WRITE.
-*/
+ */
 struct bkey_ops {
        int             (*key_invalid)(const struct bch_fs *c, struct bkey_s_c k,
                                       int rw, struct printbuf *err);
index 8dd2db4121a6b57db2a2c20fa4c9f3e0ad781428..d1cbf9266fb4ebfb065cda1475179d352f31816d 100644 (file)
@@ -118,7 +118,9 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 
 static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
 {
-       struct btree *b = kzalloc(sizeof(struct btree), gfp);
+       struct btree *b;
+
+       b = kzalloc(sizeof(struct btree), gfp);
        if (!b)
                return NULL;
 
@@ -136,7 +138,9 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
 {
        struct btree_cache *bc = &c->btree_cache;
-       struct btree *b = __btree_node_mem_alloc(c, GFP_KERNEL);
+       struct btree *b;
+
+       b = __btree_node_mem_alloc(c, GFP_KERNEL);
        if (!b)
                return NULL;
 
@@ -155,6 +159,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
 {
        int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+
        BUG_ON(ret);
 
        /* Cause future lookups for this node to fail: */
index 801a09f6fc1141f595cfc89b2cc4b151fbb85978..20e804ecb104727bc3d6bccfc8ae59cc5c5d407c 100644 (file)
@@ -199,7 +199,7 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
        struct bkey_i_btree_ptr_v2 *new;
        int ret;
 
-       new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+       new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -228,7 +228,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
        if (ret)
                return ret;
 
-       new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+       new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
index dd6b536ced6a918ebd1152da352d05609c9658a6..90f67ccd5e64c72cdde3cdf81106e7da9e6951f9 100644 (file)
@@ -1216,6 +1216,7 @@ static void btree_node_read_endio(struct bio *bio)
 
        if (rb->have_ioref) {
                struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
                bch2_latency_acct(ca, rb->start_time, READ);
        }
 
@@ -1403,6 +1404,7 @@ static void btree_node_read_all_replicas_endio(struct bio *bio)
 
        if (rb->have_ioref) {
                struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
                bch2_latency_acct(ca, rb->start_time, READ);
        }
 
index d18346a5d58d066bafd86509c2aeeb61d6ab41ed..da116153aa47686aec3218316ece721194cc5c7f 100644 (file)
@@ -214,6 +214,7 @@ err:
 
        if (p) {
                struct bkey uk = bkey_unpack_key(l->b, p);
+
                bch2_bkey_to_text(&buf2, &uk);
        } else {
                prt_printf(&buf2, "(none)");
@@ -221,6 +222,7 @@ err:
 
        if (k) {
                struct bkey uk = bkey_unpack_key(l->b, k);
+
                bch2_bkey_to_text(&buf3, &uk);
        } else {
                prt_printf(&buf3, "(none)");
@@ -1801,7 +1803,8 @@ struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
        if (bpos_cmp(start_pos, iter->journal_pos) < 0)
                iter->journal_idx = 0;
 
-       k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
+       k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
+                                       iter->path->level,
                                        start_pos, end_pos,
                                        &iter->journal_idx);
 
@@ -1823,7 +1826,7 @@ struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
 {
        struct bkey_i *next_journal =
                bch2_btree_journal_peek(trans, iter, iter->path->pos,
-                               k.k ? k.k->p : iter->path->l[0].b->key.k.p);
+                               k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
 
        if (next_journal) {
                iter->k = next_journal->k;
@@ -2902,7 +2905,7 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_
        bch2_trans_alloc_paths(trans, c);
 
        s = btree_trans_stats(trans);
-       if (s) {
+       if (s && s->max_mem) {
                unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
 
                trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
@@ -2913,9 +2916,9 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_
                } else {
                        trans->mem_bytes = expected_mem_bytes;
                }
-
-               trans->nr_max_paths = s->nr_max_paths;
        }
+       if (s)
+               trans->nr_max_paths = s->nr_max_paths;
 
        trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
 
index cd52dd5a2890e44263f7f2925bddb8951cbcba8b..66fb69801318d1a60fcf394cd9a6af40727b9d1f 100644 (file)
@@ -104,6 +104,7 @@ static void bkey_cached_free(struct btree_key_cache *bc,
        six_unlock_intent(&ck->c.lock);
 }
 
+#ifdef __KERNEL__
 static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
                                                   struct bkey_cached *ck)
 {
@@ -119,17 +120,18 @@ static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
 
        list_move(&ck->list, &bc->freed_nonpcpu);
 }
+#endif
 
 static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
                                         struct bkey_cached *ck)
 {
-       struct btree_key_cache_freelist *f;
-       bool freed = false;
-
        BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
 
        if (!ck->c.lock.readers) {
 #ifdef __KERNEL__
+               struct btree_key_cache_freelist *f;
+               bool freed = false;
+
                preempt_disable();
                f = this_cpu_ptr(bc->pcpu_freed);
 
@@ -193,11 +195,12 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path)
        struct bch_fs *c = trans->c;
        struct btree_key_cache *bc = &c->btree_key_cache;
        struct bkey_cached *ck = NULL;
-       struct btree_key_cache_freelist *f;
        bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
 
        if (!pcpu_readers) {
 #ifdef __KERNEL__
+               struct btree_key_cache_freelist *f;
+
                preempt_disable();
                f = this_cpu_ptr(bc->pcpu_freed);
                if (f->nr)
index 670746e72dabae9cb3d56a5cbe69360ca4fdbe7d..eccea15fca792614eb0e9fe782224016cea7df28 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _BCACHEFS_BTREE_KEY_CACHE_H
 #define _BCACHEFS_BTREE_KEY_CACHE_H
 
index bf8d1880673b224a09390e911dc50dfff03bb3b5..fb237c95ee13cb5d91e606207461fbfde5c7e2b7 100644 (file)
@@ -296,6 +296,7 @@ static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
                                              struct btree_bkey_cached_common *b)
 {
        int ret = __btree_node_lock_write(trans, path, b, true);
+
        BUG_ON(ret);
 }
 
index 892d1231755164dede6deafd9b61ad65148d9919..cab3de0def4e3407d27914a0f29384072ac7c6d2 100644 (file)
@@ -326,7 +326,7 @@ struct btree_key_cache {
 struct bkey_cached_key {
        u32                     btree_id;
        struct bpos             pos;
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
 
 #define BKEY_CACHED_ACCESSED           0
 #define BKEY_CACHED_DIRTY              1
index 40debf7563f8d5fdecd6185c3823d7dfda72b1cd..5ce91ae6a69e800f9a839701c1ad7fc98c83f966 100644 (file)
@@ -665,7 +665,7 @@ static void btree_update_nodes_written(struct btree_update *as)
        bch2_trans_unlock(&trans);
 
        bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
-                            "error %i in btree_update_nodes_written()", ret);
+                            "%s(): error %s", __func__, bch2_err_str(ret));
 err:
        if (as->b) {
                struct btree_path *path;
@@ -1838,10 +1838,10 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
                bch2_bpos_to_text(&buf1, prev->data->max_key);
                bch2_bpos_to_text(&buf2, next->data->min_key);
                bch_err(c,
-                       "btree topology error in btree merge:\n"
+                       "%s(): btree topology error:\n"
                        "  prev ends at   %s\n"
                        "  next starts at %s",
-                       buf1.buf, buf2.buf);
+                       __func__, buf1.buf, buf2.buf);
                printbuf_exit(&buf1);
                printbuf_exit(&buf2);
                bch2_topology_error(c);
index 116711fc01fb30f501ad206fb9c6a7f70362ca3f..cd297941e6b6f106ff00cda84c1e57b1c20b2295 100644 (file)
@@ -575,7 +575,8 @@ int bch2_mark_alloc(struct btree_trans *trans,
                                            -((s64) old_a.cached_sectors),
                                            journal_seq, gc);
                if (ret) {
-                       bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
+                       bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
+                                           __func__);
                        return ret;
                }
        }
@@ -959,7 +960,8 @@ int bch2_mark_extent(struct btree_trans *trans,
                                ret = update_cached_sectors(c, k, p.ptr.dev,
                                                disk_sectors, journal_seq, true);
                                if (ret) {
-                                       bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
+                                       bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
+                                                           __func__);
                                        return ret;
                                }
                        }
@@ -987,7 +989,7 @@ int bch2_mark_extent(struct btree_trans *trans,
                        struct printbuf buf = PRINTBUF;
 
                        bch2_bkey_val_to_text(&buf, c, k);
-                       bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
+                       bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
                        printbuf_exit(&buf);
                        return ret;
                }
index c86c3c05d62054a66faffa4c5c2cde81ab5de1e8..3d6d13bcfd72ec1dd2683eac57dbe11d5028655b 100644 (file)
@@ -78,15 +78,15 @@ static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type,
 {
        switch (type) {
        case BCH_CSUM_OPT_none:
-            return BCH_CSUM_none;
+               return BCH_CSUM_none;
        case BCH_CSUM_OPT_crc32c:
-            return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
+               return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
        case BCH_CSUM_OPT_crc64:
-            return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
+               return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
        case BCH_CSUM_OPT_xxhash:
-            return BCH_CSUM_xxhash;
+               return BCH_CSUM_xxhash;
        default:
-            BUG();
+               BUG();
        }
 }
 
index b1466932c76873c2d326f9d87507195e26fcb769..1a2c9108f864ee5eee32472ef4b133f1030f8786 100644 (file)
@@ -9,10 +9,10 @@ extern const struct bch_hash_desc bch2_dirent_hash_desc;
 int bch2_dirent_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_dirent (struct bkey_ops) {       \
+#define bch2_bkey_ops_dirent ((struct bkey_ops) {      \
        .key_invalid    = bch2_dirent_invalid,          \
        .val_to_text    = bch2_dirent_to_text,          \
-}
+})
 
 struct qstr;
 struct file;
index 3e2b22c00a3e7adf19f166b1254452c79af001d7..aba1e82bc889f7e7c6d878d7831fb0dc0c99b20a 100644 (file)
@@ -10,13 +10,13 @@ int bch2_stripe_invalid(const struct bch_fs *, struct bkey_s_c,
 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
                         struct bkey_s_c);
 
-#define bch2_bkey_ops_stripe (struct bkey_ops) {       \
+#define bch2_bkey_ops_stripe ((struct bkey_ops) {      \
        .key_invalid    = bch2_stripe_invalid,          \
        .val_to_text    = bch2_stripe_to_text,          \
        .swab           = bch2_ptr_swab,                \
        .trans_trigger  = bch2_trans_mark_stripe,       \
        .atomic_trigger = bch2_mark_stripe,             \
-}
+})
 
 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
 {
index cc9ce0be356e20931a44df8e6a0ce0944be799a0..dc906fc9176fecf9f8a7240ff78780a742367735 100644 (file)
@@ -23,6 +23,7 @@ static unsigned bch2_errcode_parents[] = {
 const char *bch2_err_str(int err)
 {
        const char *errstr;
+
        err = abs(err);
 
        BUG_ON(err >= BCH_ERR_MAX);
index 3c17b81130bbfbdba9130dc3c224597115e4cf4d..224df17206cb4f77c3f40c5665de5aba51947aec 100644 (file)
@@ -198,6 +198,7 @@ static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
        switch (k.k->type) {
        case KEY_TYPE_btree_ptr: {
                struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
+
                return (struct bkey_ptrs_c) {
                        to_entry(&e.v->start[0]),
                        to_entry(extent_entry_last(e))
@@ -205,6 +206,7 @@ static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
        }
        case KEY_TYPE_extent: {
                struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+
                return (struct bkey_ptrs_c) {
                        e.v->start,
                        extent_entry_last(e)
@@ -212,6 +214,7 @@ static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
        }
        case KEY_TYPE_stripe: {
                struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+
                return (struct bkey_ptrs_c) {
                        to_entry(&s.v->ptrs[0]),
                        to_entry(&s.v->ptrs[s.v->nr_blocks]),
@@ -227,6 +230,7 @@ static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
        }
        case KEY_TYPE_btree_ptr_v2: {
                struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
+
                return (struct bkey_ptrs_c) {
                        to_entry(&e.v->start[0]),
                        to_entry(extent_entry_last(e))
@@ -342,7 +346,7 @@ out:                                                                        \
 
 #define extent_for_each_entry_from(_e, _entry, _start)                 \
        __bkey_extent_entry_for_each_from(_start,                       \
-                               extent_entry_last(_e),_entry)
+                               extent_entry_last(_e), _entry)
 
 #define extent_for_each_entry(_e, _entry)                              \
        extent_for_each_entry_from(_e, _entry, (_e).v->start)
@@ -376,28 +380,28 @@ void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s
 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
                              int, struct bkey_s);
 
-#define bch2_bkey_ops_btree_ptr (struct bkey_ops) {            \
+#define bch2_bkey_ops_btree_ptr ((struct bkey_ops) {           \
        .key_invalid    = bch2_btree_ptr_invalid,               \
        .val_to_text    = bch2_btree_ptr_to_text,               \
        .swab           = bch2_ptr_swab,                        \
        .trans_trigger  = bch2_trans_mark_extent,               \
        .atomic_trigger = bch2_mark_extent,                     \
-}
+})
 
-#define bch2_bkey_ops_btree_ptr_v2 (struct bkey_ops) {         \
+#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) {                \
        .key_invalid    = bch2_btree_ptr_v2_invalid,            \
        .val_to_text    = bch2_btree_ptr_v2_to_text,            \
        .swab           = bch2_ptr_swab,                        \
        .compat         = bch2_btree_ptr_v2_compat,             \
        .trans_trigger  = bch2_trans_mark_extent,               \
        .atomic_trigger = bch2_mark_extent,                     \
-}
+})
 
 /* KEY_TYPE_extent: */
 
 bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
 
-#define bch2_bkey_ops_extent (struct bkey_ops) {               \
+#define bch2_bkey_ops_extent ((struct bkey_ops) {              \
        .key_invalid    = bch2_bkey_ptrs_invalid,               \
        .val_to_text    = bch2_bkey_ptrs_to_text,               \
        .swab           = bch2_ptr_swab,                        \
@@ -405,7 +409,7 @@ bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
        .key_merge      = bch2_extent_merge,                    \
        .trans_trigger  = bch2_trans_mark_extent,               \
        .atomic_trigger = bch2_mark_extent,                     \
-}
+})
 
 /* KEY_TYPE_reservation: */
 
@@ -414,13 +418,13 @@ int bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c,
 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
 
-#define bch2_bkey_ops_reservation (struct bkey_ops) {          \
+#define bch2_bkey_ops_reservation ((struct bkey_ops) {         \
        .key_invalid    = bch2_reservation_invalid,             \
        .val_to_text    = bch2_reservation_to_text,             \
        .key_merge      = bch2_reservation_merge,               \
        .trans_trigger  = bch2_trans_mark_reservation,          \
        .atomic_trigger = bch2_mark_reservation,                \
-}
+})
 
 /* Extent checksum entries: */
 
index cdb272708a4bdacf94093a7c0351570189abf973..66b945be10c2309a9e758b228b146047b20674e2 100644 (file)
@@ -65,7 +65,7 @@ do {                                                                  \
           (((p) - (fifo)->data)))
 
 #define fifo_entry_idx(fifo, p)        (((p) - &fifo_peek_front(fifo)) & (fifo)->mask)
-#define fifo_idx_entry(fifo, i)        (fifo)->data[((fifo)->front + (i)) & (fifo)->mask]
+#define fifo_idx_entry(fifo, i)        ((fifo)->data[((fifo)->front + (i)) & (fifo)->mask])
 
 #define fifo_push_back_ref(f)                                          \
        (fifo_full((f)) ? NULL : &(f)->data[(f)->back++ & (f)->mask])
index ca95d85b73488ef849c54ab26e044d72a7e76e32..f4f0e0cec85d7c536fa8c242fed641dcf801fb5e 100644 (file)
@@ -321,7 +321,7 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
        bch2_trans_iter_exit(trans, &iter);
 err:
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from __remove_dirent(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -506,7 +506,7 @@ static int snapshots_seen_add(struct bch_fs *c, struct snapshots_seen *s, u32 id
                        break;
 
                if (i->equiv == n.equiv) {
-                       bch_err(c, "adding duplicate snapshot in snapshots_seen_add()");
+                       bch_err(c, "%s(): adding duplicate snapshot", __func__);
                        return -EINVAL;
                }
        }
@@ -848,8 +848,7 @@ out:
        printbuf_exit(&buf);
        return ret;
 bad_hash:
-       if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, "
-                    "hashed to %llu\n%s",
+       if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
                     bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash,
                     (printbuf_reset(&buf),
                      bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
@@ -1000,7 +999,7 @@ static int check_inode(struct btree_trans *trans,
 err:
 fsck_err:
        if (ret)
-               bch_err(c, "error from check_inode(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1026,7 +1025,7 @@ static int check_inodes(struct bch_fs *c, bool full)
        bch2_trans_exit(&trans);
        snapshots_seen_exit(&s);
        if (ret)
-               bch_err(c, "error from check_inodes(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1159,7 +1158,7 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
        }
 fsck_err:
        if (ret)
-               bch_err(c, "error from check_i_sectors(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        if (!ret && trans_was_restarted(trans, restart_count))
                ret = -BCH_ERR_transaction_restart_nested;
        return ret;
@@ -1295,7 +1294,7 @@ fsck_err:
        printbuf_exit(&buf);
 
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from check_extent(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1337,7 +1336,7 @@ static int check_extents(struct bch_fs *c)
        snapshots_seen_exit(&s);
 
        if (ret)
-               bch_err(c, "error from check_extents(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1376,7 +1375,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
        }
 fsck_err:
        if (ret)
-               bch_err(c, "error from check_subdir_count(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        if (!ret && trans_was_restarted(trans, restart_count))
                ret = -BCH_ERR_transaction_restart_nested;
        return ret;
@@ -1497,7 +1496,7 @@ fsck_err:
        printbuf_exit(&buf);
 
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from check_target(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1667,7 +1666,7 @@ fsck_err:
        printbuf_exit(&buf);
 
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from check_dirent(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1706,7 +1705,7 @@ static int check_dirents(struct bch_fs *c)
        inode_walker_exit(&target);
 
        if (ret)
-               bch_err(c, "error from check_dirents(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1742,7 +1741,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
        ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
 fsck_err:
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from check_xattr(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1774,7 +1773,7 @@ static int check_xattrs(struct bch_fs *c)
        bch2_trans_exit(&trans);
 
        if (ret)
-               bch_err(c, "error from check_xattrs(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
index 2915f4f96f4bb3c2fc8f2190ffad7fe86a8d2408..a9742bb63809b247299fe2e77361cb54aff58af6 100644 (file)
@@ -12,26 +12,26 @@ int bch2_inode_v2_invalid(const struct bch_fs *, struct bkey_s_c, int, struct pr
 int bch2_inode_v3_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_inode (struct bkey_ops) {                \
+#define bch2_bkey_ops_inode ((struct bkey_ops) {       \
        .key_invalid    = bch2_inode_invalid,           \
        .val_to_text    = bch2_inode_to_text,           \
        .trans_trigger  = bch2_trans_mark_inode,        \
        .atomic_trigger = bch2_mark_inode,              \
-}
+})
 
-#define bch2_bkey_ops_inode_v2 (struct bkey_ops) {     \
+#define bch2_bkey_ops_inode_v2 ((struct bkey_ops) {    \
        .key_invalid    = bch2_inode_v2_invalid,        \
        .val_to_text    = bch2_inode_to_text,           \
        .trans_trigger  = bch2_trans_mark_inode,        \
        .atomic_trigger = bch2_mark_inode,              \
-}
+})
 
-#define bch2_bkey_ops_inode_v3 (struct bkey_ops) {     \
+#define bch2_bkey_ops_inode_v3 ((struct bkey_ops) {    \
        .key_invalid    = bch2_inode_v3_invalid,        \
        .val_to_text    = bch2_inode_to_text,           \
        .trans_trigger  = bch2_trans_mark_inode,        \
        .atomic_trigger = bch2_mark_inode,              \
-}
+})
 
 static inline bool bkey_is_inode(const struct bkey *k)
 {
@@ -44,10 +44,10 @@ int bch2_inode_generation_invalid(const struct bch_fs *, struct bkey_s_c,
                                  int, struct printbuf *);
 void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_inode_generation (struct bkey_ops) {     \
+#define bch2_bkey_ops_inode_generation ((struct bkey_ops) {    \
        .key_invalid    = bch2_inode_generation_invalid,        \
        .val_to_text    = bch2_inode_generation_to_text,        \
-}
+})
 
 #if 0
 typedef struct {
@@ -78,7 +78,7 @@ struct bkey_inode_buf {
 #define x(_name, _bits)                + 8 + _bits / 8
        u8              _pad[0 + BCH_INODE_FIELDS_v3()];
 #undef  x
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
 int bch2_inode_unpack(struct bkey_s_c, struct bch_inode_unpacked *);
index 9428f4233997b0072f7fd473cf45b8e6575b80d0..51d29a01b7b2c0d2979872a9cbdbc2bfd7b389c9 100644 (file)
@@ -29,8 +29,8 @@
  *
  * Synchronous updates are specified by passing a closure (@flush_cl) to
  * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
- * down to the journalling code. That closure will will wait on the journal
- * write to complete (via closure_wait()).
+ * down to the journalling code. That closure will wait on the journal write to
+ * complete (via closure_wait()).
  *
  * If the index update wasn't synchronous, the journal entry will be
  * written out after 10 ms have elapsed, by default (the delay_ms field
index 3decb7b1dde23b9ac4cfa2e26af4fdd73f03e67d..925c29b49b867ae45d9b5baf08159e0fe9d53b53 100644 (file)
@@ -5,10 +5,10 @@
 int bch2_lru_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_lru (struct bkey_ops) {  \
+#define bch2_bkey_ops_lru ((struct bkey_ops) { \
        .key_invalid    = bch2_lru_invalid,     \
        .val_to_text    = bch2_lru_to_text,     \
-}
+})
 
 int bch2_lru_delete(struct btree_trans *, u64, u64, u64, struct bkey_s_c);
 int bch2_lru_set(struct btree_trans *, u64, u64, u64 *);
index 8c67ae1da7c75806fff2ee4a22182bdd704799aa..59bed1148201c3dd73ea33672ec60af692880588 100644 (file)
@@ -10,10 +10,10 @@ extern const struct bch_sb_field_ops bch_sb_field_ops_quota;
 int bch2_quota_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_quota (struct bkey_ops) {                \
+#define bch2_bkey_ops_quota ((struct bkey_ops) {       \
        .key_invalid    = bch2_quota_invalid,           \
        .val_to_text    = bch2_quota_to_text,           \
-}
+})
 
 static inline struct bch_qid bch_qid(struct bch_inode_unpacked *u)
 {
index ea7810a1797500c826ce1b40b162d7725e15d2b9..6968f934594fa1bda4e50ed53ac6853751c0e216 100644 (file)
@@ -1251,6 +1251,20 @@ use_clean:
                        goto err;
                bch_verbose(c, "done checking need_discard and freespace btrees");
 
+               if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
+                       err = "error creating root snapshot node";
+                       ret = bch2_fs_initialize_subvolumes(c);
+                       if (ret)
+                               goto err;
+               }
+
+               bch_verbose(c, "reading snapshots table");
+               err = "error reading snapshots table";
+               ret = bch2_fs_snapshots_start(c);
+               if (ret)
+                       goto err;
+               bch_verbose(c, "reading snapshots done");
+
                set_bit(BCH_FS_MAY_GO_RW, &c->flags);
 
                bch_info(c, "starting journal replay, %zu keys", c->journal_keys.nr);
@@ -1299,7 +1313,6 @@ use_clean:
                bch_verbose(c, "done checking alloc to lru refs");
                set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
        } else {
-               set_bit(BCH_FS_MAY_GO_RW, &c->flags);
                set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
                set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
                set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
@@ -1309,6 +1322,22 @@ use_clean:
                if (c->opts.norecovery)
                        goto out;
 
+               if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
+                       err = "error creating root snapshot node";
+                       ret = bch2_fs_initialize_subvolumes(c);
+                       if (ret)
+                               goto err;
+               }
+
+               bch_verbose(c, "reading snapshots table");
+               err = "error reading snapshots table";
+               ret = bch2_fs_snapshots_start(c);
+               if (ret)
+                       goto err;
+               bch_verbose(c, "reading snapshots done");
+
+               set_bit(BCH_FS_MAY_GO_RW, &c->flags);
+
                bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
                err = "journal replay failed";
                ret = bch2_journal_replay(c);
@@ -1323,22 +1352,6 @@ use_clean:
        if (ret)
                goto err;
 
-       if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
-               bch2_fs_lazy_rw(c);
-
-               err = "error creating root snapshot node";
-               ret = bch2_fs_initialize_subvolumes(c);
-               if (ret)
-                       goto err;
-       }
-
-       bch_verbose(c, "reading snapshots table");
-       err = "error reading snapshots table";
-       ret = bch2_fs_snapshots_start(c);
-       if (ret)
-               goto err;
-       bch_verbose(c, "reading snapshots done");
-
        if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
                /* set bi_subvol on root inode */
                err = "error upgrade root inode for subvolumes";
@@ -1423,7 +1436,8 @@ out:
        set_bit(BCH_FS_FSCK_DONE, &c->flags);
        bch2_flush_fsck_errs(c);
 
-       if (!c->opts.keep_journal) {
+       if (!c->opts.keep_journal &&
+           test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
                bch2_journal_keys_free(&c->journal_keys);
                bch2_journal_entries_free(c);
        }
@@ -1536,8 +1550,7 @@ int bch2_fs_initialize(struct bch_fs *c)
                goto err;
        bch_verbose(c, "reading snapshots done");
 
-       bch2_inode_init(c, &root_inode, 0, 0,
-                       S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
+       bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
        root_inode.bi_inum      = BCACHEFS_ROOT_INO;
        root_inode.bi_subvol    = BCACHEFS_ROOT_SUBVOL;
        bch2_inode_pack(&packed_inode, &root_inode);
index f9848dc3eebbaeb770048d6c375d0a829a0f0d64..ce0012aa99c6a506a76aeb3132ceff05b6106e32 100644 (file)
@@ -8,13 +8,13 @@ void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *,
                            struct bkey_s_c);
 bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
 
-#define bch2_bkey_ops_reflink_p (struct bkey_ops) {            \
+#define bch2_bkey_ops_reflink_p ((struct bkey_ops) {           \
        .key_invalid    = bch2_reflink_p_invalid,               \
        .val_to_text    = bch2_reflink_p_to_text,               \
        .key_merge      = bch2_reflink_p_merge,                 \
        .trans_trigger  = bch2_trans_mark_reflink_p,            \
        .atomic_trigger = bch2_mark_reflink_p,                  \
-}
+})
 
 int bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c,
                           int, struct printbuf *);
@@ -23,13 +23,13 @@ void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
 int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
                              struct bkey_s_c, struct bkey_i *, unsigned);
 
-#define bch2_bkey_ops_reflink_v (struct bkey_ops) {            \
+#define bch2_bkey_ops_reflink_v ((struct bkey_ops) {           \
        .key_invalid    = bch2_reflink_v_invalid,               \
        .val_to_text    = bch2_reflink_v_to_text,               \
        .swab           = bch2_ptr_swab,                        \
        .trans_trigger  = bch2_trans_mark_reflink_v,            \
        .atomic_trigger = bch2_mark_extent,                     \
-}
+})
 
 int bch2_indirect_inline_data_invalid(const struct bch_fs *, struct bkey_s_c,
                                      int, struct printbuf *);
@@ -40,11 +40,11 @@ int bch2_trans_mark_indirect_inline_data(struct btree_trans *,
                              struct bkey_s_c, struct bkey_i *,
                              unsigned);
 
-#define bch2_bkey_ops_indirect_inline_data (struct bkey_ops) { \
+#define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) {        \
        .key_invalid    = bch2_indirect_inline_data_invalid,    \
        .val_to_text    = bch2_indirect_inline_data_to_text,    \
        .trans_trigger  = bch2_trans_mark_indirect_inline_data, \
-}
+})
 
 static inline const __le64 *bkey_refcount_c(struct bkey_s_c k)
 {
index 0535b1d3760edc1cdbc8284c44546f60f1ffb944..f12a35b3dbcf3b170dd66760e5b7944fad7fb88f 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _BCACHEFS_REPLICAS_TYPES_H
 #define _BCACHEFS_REPLICAS_TYPES_H
 
index 8c98bacca290b8301f421a16707eebf811e32126..1133783477e14424785688da513f82027615ae06 100644 (file)
@@ -158,6 +158,7 @@ static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
 
        for (i = 0; i < 2; i++) {
                int ret = snapshot_live(trans, child[i]);
+
                if (ret < 0)
                        return ret;
 
index 02a636644988a4ba51327c071b20906821cc8f93..c694c1c24483beeebb6f5534be511a21d7a3ba3e 100644 (file)
@@ -9,10 +9,10 @@ void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 int bch2_snapshot_invalid(const struct bch_fs *, struct bkey_s_c,
                          int rw, struct printbuf *);
 
-#define bch2_bkey_ops_snapshot (struct bkey_ops) {             \
+#define bch2_bkey_ops_snapshot ((struct bkey_ops) {            \
        .key_invalid    = bch2_snapshot_invalid,                \
        .val_to_text    = bch2_snapshot_to_text,                \
-}
+})
 
 int bch2_mark_snapshot(struct btree_trans *, struct bkey_s_c,
                       struct bkey_s_c, unsigned);
@@ -109,10 +109,10 @@ int bch2_subvolume_invalid(const struct bch_fs *, struct bkey_s_c,
                           int rw, struct printbuf *);
 void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_subvolume (struct bkey_ops) {            \
+#define bch2_bkey_ops_subvolume ((struct bkey_ops) {           \
        .key_invalid    = bch2_subvolume_invalid,               \
        .val_to_text    = bch2_subvolume_to_text,               \
-}
+})
 
 int bch2_subvolume_get(struct btree_trans *, unsigned,
                       bool, int, struct bch_subvolume *);
index 0f45aef78477326b1f10dd1aeae4fabe385097de..06b2924c58c82ffa440ec92aacceb4310c7f4918 100644 (file)
@@ -90,9 +90,9 @@ static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
        static struct attribute sysfs_##_name =                         \
                { .name = #_name, .mode = _mode }
 
-#define write_attribute(n)     __sysfs_attribute(n, S_IWUSR)
-#define read_attribute(n)      __sysfs_attribute(n, S_IRUGO)
-#define rw_attribute(n)                __sysfs_attribute(n, S_IRUGO|S_IWUSR)
+#define write_attribute(n)     __sysfs_attribute(n, 0200)
+#define read_attribute(n)      __sysfs_attribute(n, 0444)
+#define rw_attribute(n)                __sysfs_attribute(n, 0644)
 
 #define sysfs_printf(file, fmt, ...)                                   \
 do {                                                                   \
@@ -228,13 +228,13 @@ write_attribute(perf_test);
 
 #define x(_name)                                               \
        static struct attribute sysfs_time_stat_##_name =               \
-               { .name = #_name, .mode = S_IRUGO };
+               { .name = #_name, .mode = 0444 };
        BCH_TIME_STATS()
 #undef x
 
 static struct attribute sysfs_state_rw = {
        .name = "state",
-       .mode = S_IRUGO
+       .mode =  0444,
 };
 
 static size_t bch2_btree_cache_size(struct bch_fs *c)
@@ -613,12 +613,14 @@ struct attribute *bch2_fs_counters_files[] = {
 SHOW(bch2_fs_internal)
 {
        struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
        return bch2_fs_to_text(out, &c->kobj, attr);
 }
 
 STORE(bch2_fs_internal)
 {
        struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
        return bch2_fs_store(&c->kobj, attr, buf, size);
 }
 SYSFS_OPS(bch2_fs_internal);
index d058861811189433886cb16b482e6589604f3025..43f974eb9b7e1fdd43b01066c85ee5da30075cbc 100644 (file)
@@ -46,7 +46,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_trans_update(&trans, &iter, &k.k_i, 0));
        if (ret) {
-               bch_err(c, "update error in test_delete: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): update error in: %s", __func__, bch2_err_str(ret));
                goto err;
        }
 
@@ -55,7 +55,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_btree_delete_at(&trans, &iter, 0));
        if (ret) {
-               bch_err(c, "delete error (first) in test_delete: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): delete error (first): %s", __func__, bch2_err_str(ret));
                goto err;
        }
 
@@ -64,7 +64,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_btree_delete_at(&trans, &iter, 0));
        if (ret) {
-               bch_err(c, "delete error (second) in test_delete: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): delete error (second): %s", __func__, bch2_err_str(ret));
                goto err;
        }
 err:
@@ -92,7 +92,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_trans_update(&trans, &iter, &k.k_i, 0));
        if (ret) {
-               bch_err(c, "update error in test_delete_written: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
                goto err;
        }
 
@@ -103,7 +103,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_btree_delete_at(&trans, &iter, 0));
        if (ret) {
-               bch_err(c, "delete error in test_delete_written: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): delete error: %s", __func__, bch2_err_str(ret));
                goto err;
        }
 err:
@@ -136,7 +136,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
                ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
                                        NULL, NULL, 0);
                if (ret) {
-                       bch_err(c, "insert error in test_iterate: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
                        goto err;
                }
        }
@@ -202,7 +202,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
                ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
                                        NULL, NULL, 0);
                if (ret) {
-                       bch_err(c, "insert error in test_iterate_extents: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
                        goto err;
                }
        }
@@ -269,7 +269,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
                ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
                                        NULL, NULL, 0);
                if (ret) {
-                       bch_err(c, "insert error in test_iterate_slots: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
                        goto err;
                }
        }
@@ -342,7 +342,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
                ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
                                        NULL, NULL, 0);
                if (ret) {
-                       bch_err(c, "insert error in test_iterate_slots_extents: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
                        goto err;
                }
        }
@@ -456,7 +456,7 @@ static int insert_test_extent(struct bch_fs *c,
        ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
                                NULL, NULL, 0);
        if (ret)
-               bch_err(c, "insert error in insert_test_extent: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -555,7 +555,7 @@ static int test_snapshots(struct bch_fs *c, u64 nr)
 
        ret = test_snapshot_filter(c, snapids[0], snapids[1]);
        if (ret) {
-               bch_err(c, "err from test_snapshot_filter: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): err from test_snapshot_filter: %s", __func__, bch2_err_str(ret));
                return ret;
        }
 
@@ -567,11 +567,8 @@ static int test_snapshots(struct bch_fs *c, u64 nr)
 static u64 test_rand(void)
 {
        u64 v;
-#if 0
-       v = prandom_u32();
-#else
+
        prandom_bytes(&v, sizeof(v));
-#endif
        return v;
 }
 
@@ -592,7 +589,7 @@ static int rand_insert(struct bch_fs *c, u64 nr)
                ret = commit_do(&trans, NULL, NULL, 0,
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
                if (ret) {
-                       bch_err(c, "error in rand_insert: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -628,7 +625,7 @@ static int rand_insert_multi(struct bch_fs *c, u64 nr)
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
                if (ret) {
-                       bch_err(c, "error in rand_insert_multi: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -655,7 +652,7 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
                lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
                ret = bkey_err(k);
                if (ret) {
-                       bch_err(c, "error in rand_lookup: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -678,7 +675,7 @@ static int rand_mixed_trans(struct btree_trans *trans,
        k = bch2_btree_iter_peek(iter);
        ret = bkey_err(k);
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(trans->c, "lookup error in rand_mixed: %s", bch2_err_str(ret));
+               bch_err(trans->c, "%s(): lookup error: %s", __func__, bch2_err_str(ret));
        if (ret)
                return ret;
 
@@ -708,7 +705,7 @@ static int rand_mixed(struct bch_fs *c, u64 nr)
                ret = commit_do(&trans, NULL, NULL, 0,
                        rand_mixed_trans(&trans, &iter, &cookie, i, rand));
                if (ret) {
-                       bch_err(c, "update error in rand_mixed: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -754,7 +751,7 @@ static int rand_delete(struct bch_fs *c, u64 nr)
                ret = commit_do(&trans, NULL, NULL, 0,
                        __do_delete(&trans, pos));
                if (ret) {
-                       bch_err(c, "error in rand_delete: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -786,7 +783,7 @@ static int seq_insert(struct bch_fs *c, u64 nr)
                        bch2_trans_update(&trans, &iter, &insert.k_i, 0);
                }));
        if (ret)
-               bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
 
        bch2_trans_exit(&trans);
        return ret;
@@ -805,7 +802,7 @@ static int seq_lookup(struct bch_fs *c, u64 nr)
                                  SPOS(0, 0, U32_MAX), 0, k,
                0);
        if (ret)
-               bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
 
        bch2_trans_exit(&trans);
        return ret;
@@ -831,7 +828,7 @@ static int seq_overwrite(struct bch_fs *c, u64 nr)
                        bch2_trans_update(&trans, &iter, &u.k_i, 0);
                }));
        if (ret)
-               bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
 
        bch2_trans_exit(&trans);
        return ret;
@@ -845,7 +842,7 @@ static int seq_delete(struct bch_fs *c, u64 nr)
                                      SPOS(0, 0, U32_MAX), SPOS_MAX,
                                      0, NULL);
        if (ret)
-               bch_err(c, "error in seq_delete: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
index 66d7a1e30350e30875d3e1597675ddf264a9c8e6..03f1b73fc926290a90fe9b8af13274c3b88c1cde 100644 (file)
@@ -9,10 +9,10 @@ extern const struct bch_hash_desc bch2_xattr_hash_desc;
 int bch2_xattr_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_xattr (struct bkey_ops) {                \
+#define bch2_bkey_ops_xattr ((struct bkey_ops) {       \
        .key_invalid    = bch2_xattr_invalid,           \
        .val_to_text    = bch2_xattr_to_text,           \
-}
+})
 
 static inline unsigned xattr_val_u64s(unsigned name_len, unsigned val_len)
 {