]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update bcachefs sources to 171da96d76 bcachefs: Drop some anonymous structs, unions
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 5 Mar 2023 03:47:04 +0000 (22:47 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 5 Mar 2023 04:33:25 +0000 (23:33 -0500)
30 files changed:
.bcachefs_revision
Makefile
cmd_list.c
cmd_list_journal.c
cmd_migrate.c
include/linux/closure.h
include/linux/freezer.h
libbcachefs/bcachefs.h
libbcachefs/bcachefs_format.h
libbcachefs/bkey.h
libbcachefs/bkey_sort.c
libbcachefs/bset.c
libbcachefs/bset.h
libbcachefs/btree_io.c
libbcachefs/btree_key_cache.c
libbcachefs/btree_update_interior.c
libbcachefs/data_update.c
libbcachefs/debug.c
libbcachefs/ec.c
libbcachefs/extents.h
libbcachefs/io.c
libbcachefs/journal_io.c
libbcachefs/journal_io.h
libbcachefs/move.c
libbcachefs/move.h
libbcachefs/movinggc.c
libbcachefs/recovery.c
libbcachefs/util.h
linux/closure.c
linux/kthread.c

index 033b6af2538f56e2f720b4c30beefaca69efe1f4..48ce6994bc10cf8905141a42cd0bcb8335580859 100644 (file)
@@ -1 +1 @@
-c28937622fbd373f152df01f29efa2d79af99633
+171da96d76d03a12872c8c9e2d02602c3ddfcb5f
index 8846e1276adac414fc24860e5f0c46cd076e5b70..443322801fbf679aafc82c769b3c4203d7bf8ec0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -190,6 +190,10 @@ update-bcachefs-sources:
        git add libbcachefs/*.[ch]
        cp $(LINUX_DIR)/include/trace/events/bcachefs.h include/trace/events/
        git add include/trace/events/bcachefs.h
+       cp $(LINUX_DIR)/include/linux/closure.h include/linux/
+       git add include/linux/closure.h
+       cp $(LINUX_DIR)/lib/closure.c linux/
+       git add linux/closure.c
        cp $(LINUX_DIR)/include/linux/xxhash.h include/linux/
        git add include/linux/xxhash.h
        cp $(LINUX_DIR)/lib/xxhash.c linux/
index 1b07df3019733dd2ee5b0c83ba6b3a731eb9b049..dc95f7a0e2b0af18b34507b0ef49d489908ebce2 100644 (file)
@@ -190,7 +190,7 @@ static void print_node_ondisk(struct bch_fs *c, struct btree *b)
                        le64_to_cpu(i->journal_seq));
                offset += sectors;
 
-               for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) {
+               for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) {
                        struct bkey u;
                        struct printbuf buf = PRINTBUF;
 
index 4c77bf3fe0c0b105473d54a26408e9b40ab3c4a6..171242253ce8cc964b9344979f9c6234c87c552a 100644 (file)
@@ -81,7 +81,7 @@ static bool entry_matches_transaction_filter(struct jset_entry *entry,
            entry->type == BCH_JSET_ENTRY_overwrite) {
                struct bkey_i *k;
 
-               vstruct_for_each(entry, k)
+               jset_entry_for_each_key(entry, k)
                        if (bkey_matches_filter(filter, entry, k))
                                return true;
        }
@@ -117,7 +117,7 @@ static bool should_print_entry(struct jset_entry *entry, d_btree_id filter)
            entry->type != BCH_JSET_ENTRY_overwrite)
                return true;
 
-       vstruct_for_each(entry, k)
+       jset_entry_for_each_key(entry, k)
                darray_for_each(filter, id)
                        if (entry->btree_id == *id)
                                return true;
index 5a35c5a178aba004ab4a01c40953ea6113cbecee..8cf7c9840e461997c253250485b414e70712c8d5 100644 (file)
@@ -311,7 +311,7 @@ static void link_data(struct bch_fs *c, struct bch_inode_unpacked *dst,
 
        while (length) {
                struct bkey_i_extent *e;
-               __BKEY_PADDED(k, BKEY_EXTENT_VAL_U64s_MAX) k;
+               BKEY_PADDED_ONSTACK(k, BKEY_EXTENT_VAL_U64s_MAX) k;
                u64 b = sector_to_bucket(ca, physical);
                struct disk_reservation res;
                unsigned sectors;
index d85ca8696b746c94ccc21b0154e994ef7b932658..722a586bb22444418d31eb80d9d25e89de5d72a2 100644 (file)
@@ -3,7 +3,6 @@
 #define _LINUX_CLOSURE_H
 
 #include <linux/llist.h>
-#include <linux/rcupdate.h>
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 #include <linux/workqueue.h>
@@ -173,6 +172,11 @@ void __closure_wake_up(struct closure_waitlist *list);
 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
 void __closure_sync(struct closure *cl);
 
+static inline unsigned closure_nr_remaining(struct closure *cl)
+{
+       return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
+}
+
 /**
  * closure_sync - sleep until a closure a closure has nothing left to wait on
  *
@@ -181,7 +185,7 @@ void __closure_sync(struct closure *cl);
  */
 static inline void closure_sync(struct closure *cl)
 {
-       if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
+       if (closure_nr_remaining(cl) != 1)
                __closure_sync(cl);
 }
 
index cf485d78ed59eb05aa48a2e42c5fb1c04cb3418a..d90373f39811fdc79ad23bdd6c3f7239acf66616 100644 (file)
@@ -7,4 +7,6 @@
 #define freezable_schedule()   schedule()
 #define freezable_schedule_timeout(_t) schedule_timeout(_t)
 
+static inline void __refrigerator(bool f) {}
+
 #endif /* __TOOLS_LINUX_FREEZER_H */
index 3f88e7eac17c13e7e9b41e55c458763b948fa336..25a32fd6c8f2a594b7fca3cd8dc0eaba4d003a8f 100644 (file)
@@ -1123,4 +1123,7 @@ static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
        return dev < c->sb.nr_devices && c->devs[dev];
 }
 
+#define BKEY_PADDED_ONSTACK(key, pad)                          \
+       struct { struct bkey_i key; __u64 key ## _pad[pad]; }
+
 #endif /* _BCACHEFS_H */
index 750a6923ef4324032015f3081764ed25c64998d2..8b29e462492b61888972bac34c9877ad77f99c43 100644 (file)
@@ -290,16 +290,8 @@ enum bch_bkey_fields {
 struct bkey_i {
        __u64                   _data[0];
 
-       union {
-       struct {
-               /* Size of combined key and value, in u64s */
-               __u8            u64s;
-       };
-       struct {
-               struct bkey     k;
-               struct bch_val  v;
-       };
-       };
+       struct bkey     k;
+       struct bch_val  v;
 };
 
 #define KEY(_inode, _offset, _size)                                    \
@@ -318,7 +310,7 @@ static inline void bkey_init(struct bkey *k)
 #define bkey_bytes(_k)         ((_k)->u64s * sizeof(__u64))
 
 #define __BKEY_PADDED(key, pad)                                        \
-       struct { struct bkey_i key; __u64 key ## _pad[pad]; }
+       struct bkey_i key; __u64 key ## _pad[pad]
 
 /*
  * - DELETED keys are used internally to mark keys that should be ignored but
index 7698e7ec72479258d83d7c640ea359c4f34f91e6..983572efd0814688d6fb1f6a184c1d247d70739d 100644 (file)
@@ -34,7 +34,12 @@ struct bkey_s {
        };
 };
 
-#define bkey_next(_k)          vstruct_next(_k)
+#define bkey_p_next(_k)                vstruct_next(_k)
+
+static inline struct bkey_i *bkey_next(struct bkey_i *k)
+{
+       return (struct bkey_i *) (k->_data + k->k.u64s);
+}
 
 #define bkey_val_u64s(_k)      ((_k)->u64s - BKEY_U64s)
 
index 557a79cad98670b5f753f6e7d71372b4418cf7a8..cdef41db76925c235e218974e0dba760da61e55c 100644 (file)
@@ -46,7 +46,7 @@ static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
 
        BUG_ON(!iter->used);
 
-       i->k = bkey_next(i->k);
+       i->k = bkey_p_next(i->k);
 
        BUG_ON(i->k > i->end);
 
@@ -108,7 +108,7 @@ bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
                    !should_drop_next_key(iter)) {
                        bkey_copy(out, k);
                        btree_keys_account_key_add(&nr, 0, out);
-                       out = bkey_next(out);
+                       out = bkey_p_next(out);
                }
 
                sort_iter_advance(iter, key_sort_fix_overlapping_cmp);
@@ -147,7 +147,7 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
                out->needs_whiteout = false;
 
                btree_keys_account_key_add(&nr, 0, out);
-               out = bkey_next(out);
+               out = bkey_p_next(out);
        }
 
        dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
@@ -194,7 +194,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
                        bkey_copy(out, in);
                }
                out->needs_whiteout |= needs_whiteout;
-               out = bkey_next(out);
+               out = bkey_p_next(out);
        }
 
        return (u64 *) out - (u64 *) dst;
index 89478fc57411c95effcd339bf3049d9000123358..3bd50f12f5a4ce2306fcc666cddc6f27abc486ee 100644 (file)
@@ -69,7 +69,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
        for (_k = i->start;
             _k < vstruct_last(i);
             _k = _n) {
-               _n = bkey_next(_k);
+               _n = bkey_p_next(_k);
 
                k = bkey_disassemble(b, _k, &uk);
 
@@ -542,7 +542,7 @@ start:
                               rw_aux_tree(b, t)[j - 1].offset);
                }
 
-               k = bkey_next(k);
+               k = bkey_p_next(k);
                BUG_ON(k >= btree_bkey_last(b, t));
        }
 }
@@ -733,7 +733,7 @@ retry:
        /* First we figure out where the first key in each cacheline is */
        eytzinger1_for_each(j, t->size - 1) {
                while (bkey_to_cacheline(b, t, k) < cacheline)
-                       prev = k, k = bkey_next(k);
+                       prev = k, k = bkey_p_next(k);
 
                if (k >= btree_bkey_last(b, t)) {
                        /* XXX: this path sucks */
@@ -750,7 +750,7 @@ retry:
        }
 
        while (k != btree_bkey_last(b, t))
-               prev = k, k = bkey_next(k);
+               prev = k, k = bkey_p_next(k);
 
        if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
                bkey_init(&min_key.k);
@@ -888,7 +888,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
        struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
 
        while ((p = __bkey_prev(b, t, k)) && !ret) {
-               for (i = p; i != k; i = bkey_next(i))
+               for (i = p; i != k; i = bkey_p_next(i))
                        if (i->type >= min_key_type)
                                ret = i;
 
@@ -899,10 +899,10 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
                BUG_ON(ret >= orig_k);
 
                for (i = ret
-                       ? bkey_next(ret)
+                       ? bkey_p_next(ret)
                        : btree_bkey_first(b, t);
                     i != orig_k;
-                    i = bkey_next(i))
+                    i = bkey_p_next(i))
                        BUG_ON(i->type >= min_key_type);
        }
 
@@ -974,7 +974,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
                struct bkey_packed *k = start;
 
                while (1) {
-                       k = bkey_next(k);
+                       k = bkey_p_next(k);
                        if (k == end)
                                break;
 
@@ -1208,12 +1208,12 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b,
                while (m != btree_bkey_last(b, t) &&
                       bkey_iter_cmp_p_or_unp(b, m,
                                        lossy_packed_search, search) < 0)
-                       m = bkey_next(m);
+                       m = bkey_p_next(m);
 
        if (!packed_search)
                while (m != btree_bkey_last(b, t) &&
                       bkey_iter_pos_cmp(b, m, search) < 0)
-                       m = bkey_next(m);
+                       m = bkey_p_next(m);
 
        if (bch2_expensive_debug_checks) {
                struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
index fd2915a150708f9b9e5203d761c2ea2342e872ef..2105e7836557c854c12070f309f5116b8562eaf7 100644 (file)
@@ -211,7 +211,7 @@ static inline size_t btree_aux_data_u64s(const struct btree *b)
 #define bset_tree_for_each_key(_b, _t, _k)                             \
        for (_k = btree_bkey_first(_b, _t);                             \
             _k != btree_bkey_last(_b, _t);                             \
-            _k = bkey_next(_k))
+            _k = bkey_p_next(_k))
 
 static inline bool bset_has_ro_aux_tree(struct bset_tree *t)
 {
index 999ea2e9d086cc8e008c0771f60b456e312e361f..29163b46ba5980033fdbc9da5a74db9019c5c063 100644 (file)
@@ -77,9 +77,9 @@ static void verify_no_dups(struct btree *b,
        if (start == end)
                return;
 
-       for (p = start, k = bkey_next(start);
+       for (p = start, k = bkey_p_next(start);
             k != end;
-            p = k, k = bkey_next(k)) {
+            p = k, k = bkey_p_next(k)) {
                struct bkey l = bkey_unpack_key(b, p);
                struct bkey r = bkey_unpack_key(b, k);
 
@@ -92,7 +92,7 @@ static void set_needs_whiteout(struct bset *i, int v)
 {
        struct bkey_packed *k;
 
-       for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
+       for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
                k->needs_whiteout = v;
 }
 
@@ -175,7 +175,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
 
        for (k = unwritten_whiteouts_start(c, b);
             k != unwritten_whiteouts_end(c, b);
-            k = bkey_next(k))
+            k = bkey_p_next(k))
                *--ptrs = k;
 
        sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
@@ -184,7 +184,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
 
        while (ptrs != ptrs_end) {
                bkey_copy(k, *ptrs);
-               k = bkey_next(k);
+               k = bkey_p_next(k);
                ptrs++;
        }
 
@@ -256,11 +256,11 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
                out = i->start;
 
                for (k = start; k != end; k = n) {
-                       n = bkey_next(k);
+                       n = bkey_p_next(k);
 
                        if (!bkey_deleted(k)) {
                                bkey_copy(out, k);
-                               out = bkey_next(out);
+                               out = bkey_p_next(out);
                        } else {
                                BUG_ON(k->needs_whiteout);
                        }
@@ -652,7 +652,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
                struct bset *i = bset(b, t);
                struct bkey_packed *k;
 
-               for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
+               for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
                        if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
                                break;
 
@@ -665,7 +665,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
                        set_btree_bset_end(b, t);
                }
 
-               for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
+               for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
                        if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
                                break;
 
@@ -843,7 +843,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
                struct bkey_s u;
                struct bkey tmp;
 
-               if (btree_err_on(bkey_next(k) > vstruct_last(i),
+               if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
                                 BTREE_ERR_FIXABLE, c, NULL, b, i,
                                 "key extends past end of bset")) {
                        i->u64s = cpu_to_le16((u64 *) k - i->_data);
@@ -854,7 +854,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
                                 BTREE_ERR_FIXABLE, c, NULL, b, i,
                                 "invalid bkey format %u", k->format)) {
                        i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
-                       memmove_u64s_down(k, bkey_next(k),
+                       memmove_u64s_down(k, bkey_p_next(k),
                                          (u64 *) vstruct_end(i) - (u64 *) k);
                        continue;
                }
@@ -878,7 +878,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
                        btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
 
                        i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
-                       memmove_u64s_down(k, bkey_next(k),
+                       memmove_u64s_down(k, bkey_p_next(k),
                                          (u64 *) vstruct_end(i) - (u64 *) k);
                        continue;
                }
@@ -901,14 +901,14 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
 
                        if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) {
                                i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
-                               memmove_u64s_down(k, bkey_next(k),
+                               memmove_u64s_down(k, bkey_p_next(k),
                                                  (u64 *) vstruct_end(i) - (u64 *) k);
                                continue;
                        }
                }
 
                prev = k;
-               k = bkey_next(k);
+               k = bkey_p_next(k);
        }
 fsck_err:
        printbuf_exit(&buf);
@@ -1139,7 +1139,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                        btree_keys_account_key_drop(&b->nr, 0, k);
 
                        i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
-                       memmove_u64s_down(k, bkey_next(k),
+                       memmove_u64s_down(k, bkey_p_next(k),
                                          (u64 *) vstruct_end(i) - (u64 *) k);
                        set_btree_bset_end(b, b->set);
                        continue;
@@ -1151,7 +1151,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                        bp.v->mem_ptr = 0;
                }
 
-               k = bkey_next(k);
+               k = bkey_p_next(k);
        }
 
        bch2_bset_build_aux_tree(b, b->set, false);
@@ -1847,7 +1847,7 @@ static void btree_write_submit(struct work_struct *work)
 {
        struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
        struct bch_extent_ptr *ptr;
-       __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
+       BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
 
        bkey_copy(&tmp.k, &wbio->key);
 
index 441b04c3232ce4a75373a50a10eaa995cf7af4ad..298a674dbfd6e9963a624a410fe7d5a814954a80 100644 (file)
@@ -777,14 +777,14 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
        struct bkey_cached *ck = (void *) path->l[0].b;
        bool kick_reclaim = false;
 
-       BUG_ON(insert->u64s > ck->u64s);
+       BUG_ON(insert->k.u64s > ck->u64s);
 
        if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
                int difference;
 
-               BUG_ON(jset_u64s(insert->u64s) > trans->journal_preres.u64s);
+               BUG_ON(jset_u64s(insert->k.u64s) > trans->journal_preres.u64s);
 
-               difference = jset_u64s(insert->u64s) - ck->res.u64s;
+               difference = jset_u64s(insert->k.u64s) - ck->res.u64s;
                if (difference > 0) {
                        trans->journal_preres.u64s      -= difference;
                        ck->res.u64s                    += difference;
index 463d05c1cb2bacf9c2e3afd6a1993bed01d27ae7..92a49971188cbe100e87e4bc0ddb9efcff8e875b 100644 (file)
@@ -242,7 +242,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
        struct bch_fs *c = trans->c;
        struct write_point *wp;
        struct btree *b;
-       __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
+       BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
        struct open_buckets ob = { .nr = 0 };
        struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
        unsigned nr_reserve;
@@ -1412,7 +1412,7 @@ static void __btree_split_node(struct btree_update *as,
                out[i]->needs_whiteout = false;
 
                btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
-               out[i] = bkey_next(out[i]);
+               out[i] = bkey_p_next(out[i]);
        }
 
        for (i = 0; i < 2; i++) {
@@ -2444,7 +2444,7 @@ bch2_btree_roots_to_journal_entries(struct bch_fs *c,
                                          BCH_JSET_ENTRY_btree_root,
                                          i, c->btree_roots[i].level,
                                          &c->btree_roots[i].key,
-                                         c->btree_roots[i].key.u64s);
+                                         c->btree_roots[i].key.k.u64s);
                        end = vstruct_next(end);
                }
 
index ca469473c72cde0825fd5665e495e05dcbdf29eb..eb91e24c3157c8bb8c25aa98f699afb9849f43c8 100644 (file)
@@ -274,7 +274,7 @@ next:
                }
                continue;
 nomatch:
-               if (m->ctxt) {
+               if (m->ctxt && m->ctxt->stats) {
                        BUG_ON(k.k->p.offset <= iter.pos.offset);
                        atomic64_inc(&m->ctxt->stats->keys_raced);
                        atomic64_add(k.k->p.offset - iter.pos.offset,
index 8f43581f3972d283f847723459981ca7068d58fd..0035fe875a478d8b14a1c6ea0472083f22c5725a 100644 (file)
@@ -153,7 +153,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
 
        BUG_ON(b->nsets != 1);
 
-       for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_next(k))
+       for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k))
                if (k->type == KEY_TYPE_btree_ptr_v2) {
                        struct bch_btree_ptr_v2 *v = (void *) bkeyp_val(&b->format, k);
                        v->mem_ptr = 0;
index f759888341c9226bab02d84de368db4f8ba5c77d..c0342e6094a92bc04fd1f43f40056ccf9bd9cb75 100644 (file)
@@ -869,12 +869,10 @@ static int ec_stripe_key_update(struct btree_trans *trans,
                for (i = 0; i < new->v.nr_blocks; i++) {
                        unsigned v = stripe_blockcount_get(old, i);
 
-                       if (!v)
-                               continue;
-
-                       BUG_ON(old->ptrs[i].dev != new->v.ptrs[i].dev ||
-                              old->ptrs[i].gen != new->v.ptrs[i].gen ||
-                              old->ptrs[i].offset != new->v.ptrs[i].offset);
+                       BUG_ON(v &&
+                              (old->ptrs[i].dev != new->v.ptrs[i].dev ||
+                               old->ptrs[i].gen != new->v.ptrs[i].gen ||
+                               old->ptrs[i].offset != new->v.ptrs[i].offset));
 
                        stripe_blockcount_set(&new->v, i, v);
                }
@@ -1594,8 +1592,6 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
        bkey_copy(&h->s->new_stripe.key.k_i, &h->s->existing_stripe.key.k_i);
        h->s->have_existing_stripe = true;
 
-       pr_info("reused %llu", h->s->idx);
-
        return 0;
 }
 
@@ -1687,9 +1683,9 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
        if (h->s->allocated)
                goto allocated;
 
-       if (h->s->idx)
+       if (h->s->have_existing_stripe)
                goto alloc_existing;
-#if 0
+
        /* First, try to allocate a full stripe: */
        ret =   new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?:
                __bch2_ec_stripe_head_reserve(trans, h);
@@ -1699,24 +1695,17 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
            bch2_err_matches(ret, ENOMEM))
                goto err;
 
-       if (ret == -BCH_ERR_open_buckets_empty) {
-               /* don't want to reuse in this case */
-       }
-#endif
        /*
         * Not enough buckets available for a full stripe: we must reuse an
         * existing stripe:
         */
        while (1) {
                ret = __bch2_ec_stripe_head_reuse(trans, h);
-               if (ret)
-                       ret = __bch2_ec_stripe_head_reserve(trans, h);
                if (!ret)
                        break;
-               pr_info("err %s", bch2_err_str(ret));
                if (ret == -BCH_ERR_ENOSPC_stripe_reuse && cl)
                        ret = -BCH_ERR_stripe_alloc_blocked;
-               if (waiting || !cl)
+               if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
                        goto err;
 
                /* XXX freelist_wait? */
index 2e37543a62291a8335106abefed72da047e57753..bac6a1ed2c599a945e915c23b0772c2221887012 100644 (file)
@@ -633,7 +633,7 @@ static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr
                memcpy((void *) &k->v + bkey_val_bytes(&k->k),
                       &ptr,
                       sizeof(ptr));
-               k->u64s++;
+               k->k.u64s++;
                break;
        default:
                BUG();
index 7ec36113b9d62f166e860bf984c6e9ddc05c6a02..de30daca4277b5883a6ef40c31c8682657bbfc7d 100644 (file)
@@ -734,7 +734,7 @@ static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
                }
 
                if (dst != src)
-                       memmove_u64s_down(dst, src, src->u64s);
+                       memmove_u64s_down(dst, src, src->k.u64s);
                dst = bkey_next(dst);
        }
 
index 641ca36f384f04cd7ee033d64916407600f78259..8d3878bde1d1039f610b26fd84d8b39221f130ef 100644 (file)
@@ -357,7 +357,7 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
        struct bkey_i *k;
        bool first = true;
 
-       vstruct_for_each(entry, k) {
+       jset_entry_for_each_key(entry, k) {
                if (!first) {
                        prt_newline(out);
                        prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
index a32c2876f2a65075e5803ce01c18aa9f29793aec..8801e98104bd8aaa9671b41f89ee4e904169388d 100644 (file)
@@ -40,9 +40,14 @@ static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
             (entry = __jset_entry_type_next(jset, entry, type));       \
             entry = vstruct_next(entry))
 
-#define for_each_jset_key(k, _n, entry, jset)                          \
-       for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys)        \
-               vstruct_for_each_safe(entry, k, _n)
+#define jset_entry_for_each_key(_e, _k)                                        \
+       for (_k = (_e)->start;                                          \
+            _k < vstruct_last(_e);                                     \
+            _k = bkey_next(_k))
+
+#define for_each_jset_key(k, entry, jset)                              \
+       for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys)\
+               jset_entry_for_each_key(entry, k)
 
 int bch2_journal_entry_validate(struct bch_fs *, struct jset *,
                                struct jset_entry *, unsigned, int, int);
index 4ef7595fa6a2d5f43c098b40d10a5ca73ae029cf..11ea109fefec5cf23b55b3655b5a3cea2417828b 100644 (file)
@@ -304,12 +304,6 @@ static int bch2_move_extent(struct btree_trans *trans,
        if (ret && ret != -BCH_ERR_unwritten_extent_update)
                goto err_free_pages;
 
-       io->write.ctxt = ctxt;
-       io->write.op.end_io = move_write_done;
-
-       atomic64_inc(&ctxt->stats->keys_moved);
-       atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
-
        if (ret == -BCH_ERR_unwritten_extent_update) {
                bch2_update_unwritten_extent(trans, &io->write);
                move_free(io);
@@ -318,6 +312,14 @@ static int bch2_move_extent(struct btree_trans *trans,
 
        BUG_ON(ret);
 
+       io->write.ctxt = ctxt;
+       io->write.op.end_io = move_write_done;
+
+       if (ctxt->stats) {
+               atomic64_inc(&ctxt->stats->keys_moved);
+               atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
+       }
+
        this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
        this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
        trace_move_extent_read(k.k);
@@ -469,9 +471,11 @@ static int __bch2_move_data(struct moving_context *ctxt,
        bch2_bkey_buf_init(&sk);
        bch2_trans_init(&trans, c, 0, 0);
 
-       ctxt->stats->data_type  = BCH_DATA_user;
-       ctxt->stats->btree_id   = btree_id;
-       ctxt->stats->pos        = start;
+       if (ctxt->stats) {
+               ctxt->stats->data_type  = BCH_DATA_user;
+               ctxt->stats->btree_id   = btree_id;
+               ctxt->stats->pos        = start;
+       }
 
        bch2_trans_iter_init(&trans, &iter, btree_id, start,
                             BTREE_ITER_PREFETCH|
@@ -496,7 +500,8 @@ static int __bch2_move_data(struct moving_context *ctxt,
                if (bkey_ge(bkey_start_pos(k.k), end))
                        break;
 
-               ctxt->stats->pos = iter.pos;
+               if (ctxt->stats)
+                       ctxt->stats->pos = iter.pos;
 
                if (!bkey_extent_is_direct_data(k.k))
                        goto next_nondata;
@@ -536,7 +541,8 @@ static int __bch2_move_data(struct moving_context *ctxt,
                if (ctxt->rate)
                        bch2_ratelimit_increment(ctxt->rate, k.k->size);
 next:
-               atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
+               if (ctxt->stats)
+                       atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
 next_nondata:
                bch2_btree_iter_advance(&iter);
        }
@@ -585,7 +591,7 @@ int bch2_move_data(struct bch_fs *c,
        return ret;
 }
 
-static noinline void verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen)
+void bch2_verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen)
 {
        struct bch_fs *c = trans->c;
        struct btree_iter iter;
@@ -620,6 +626,9 @@ again:
 failed_to_evacuate:
        bch2_trans_iter_exit(trans, &iter);
 
+       if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
+               return;
+
        prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket "));
        bch2_bkey_val_to_text(&buf, c, k);
 
@@ -760,7 +769,8 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
 
                        if (ctxt->rate)
                                bch2_ratelimit_increment(ctxt->rate, k.k->size);
-                       atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
+                       if (ctxt->stats)
+                               atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
                } else {
                        struct btree *b;
 
@@ -787,8 +797,10 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
                        if (ctxt->rate)
                                bch2_ratelimit_increment(ctxt->rate,
                                                         c->opts.btree_node_size >> 9);
-                       atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
-                       atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
+                       if (ctxt->stats) {
+                               atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
+                               atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
+                       }
                }
 next:
                bp_offset++;
@@ -801,7 +813,7 @@ next:
                move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads));
                closure_sync(&ctxt->cl);
                if (!ctxt->write_error)
-                       verify_bucket_evacuated(trans, bucket, gen);
+                       bch2_verify_bucket_evacuated(trans, bucket, gen);
        }
 err:
        bch2_bkey_buf_exit(&sk, c);
index c5a7c0add1d695939c6b0b52123c5356d9a07228..3b283af3bdb642339df220eb229cbeeed5de4f5e 100644 (file)
@@ -30,6 +30,8 @@ struct moving_context {
        wait_queue_head_t       wait;
 };
 
+void bch2_verify_bucket_evacuated(struct btree_trans *, struct bpos, int);
+
 #define move_ctxt_wait_event(_ctxt, _trans, _cond)                     \
 do {                                                                   \
        bool cond_finished = false;                                     \
index 96a9512c705ea98932abdaf298be461b2c4fbc0d..80f922767da94d20998be44f6b54b1e6a4c9e9b7 100644 (file)
@@ -26,6 +26,7 @@
 #include "super-io.h"
 
 #include <trace/events/bcachefs.h>
+#include <linux/bsearch.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/math64.h>
@@ -71,62 +72,147 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
        return ret;
 }
 
-static int bch2_copygc_next_bucket(struct btree_trans *trans,
-                                  struct bpos *bucket, u8 *gen, struct bpos *pos)
+struct copygc_bucket_in_flight {
+       struct bpos             bucket;
+       u8                      gen;
+       struct moving_context   ctxt;
+};
+
+typedef FIFO(struct copygc_bucket_in_flight) copygc_buckets_in_flight;
+
+struct copygc_bucket {
+       struct bpos             bucket;
+       u8                      gen;
+};
+
+typedef DARRAY(struct copygc_bucket) copygc_buckets;
+
+static int copygc_bucket_cmp(const void *_l, const void *_r)
+{
+       const struct copygc_bucket *l = _l;
+       const struct copygc_bucket *r = _r;
+
+       return bpos_cmp(l->bucket, r->bucket) ?: cmp_int(l->gen, r->gen);
+}
+
+static bool bucket_in_flight(copygc_buckets *buckets_sorted, struct copygc_bucket b)
+{
+       return bsearch(&b,
+                      buckets_sorted->data,
+                      buckets_sorted->nr,
+                      sizeof(buckets_sorted->data[0]),
+                      copygc_bucket_cmp) != NULL;
+}
+
+static void copygc_buckets_wait(struct btree_trans *trans,
+                               copygc_buckets_in_flight *buckets_in_flight,
+                               size_t nr, bool verify_evacuated)
+{
+       while (!fifo_empty(buckets_in_flight)) {
+               struct copygc_bucket_in_flight *i = &fifo_peek_front(buckets_in_flight);
+
+               if (fifo_used(buckets_in_flight) <= nr &&
+                   closure_nr_remaining(&i->ctxt.cl) != 1)
+                       break;
+
+               /*
+                * moving_ctxt_exit calls bch2_write as it flushes pending
+                * reads, which inits another btree_trans; this one must be
+                * unlocked:
+                */
+               bch2_trans_unlock(trans);
+               bch2_moving_ctxt_exit(&i->ctxt);
+               if (verify_evacuated)
+                       bch2_verify_bucket_evacuated(trans, i->bucket, i->gen);
+               buckets_in_flight->front++;
+       }
+}
+
+static int bch2_copygc_get_buckets(struct btree_trans *trans,
+                       copygc_buckets_in_flight *buckets_in_flight,
+                       copygc_buckets *buckets)
 {
        struct btree_iter iter;
+       copygc_buckets buckets_sorted = { 0 };
+       struct copygc_bucket_in_flight *i;
        struct bkey_s_c k;
+       size_t fifo_iter;
        int ret;
 
+       copygc_buckets_wait(trans, buckets_in_flight, buckets_in_flight->size / 2, true);
+
+       fifo_for_each_entry_ptr(i, buckets_in_flight, fifo_iter) {
+               ret = darray_push(&buckets_sorted, ((struct copygc_bucket) {i->bucket, i->gen}));
+               if (ret) {
+                       bch_err(trans->c, "error allocating copygc_buckets_sorted");
+                       goto err;
+               }
+       }
+
+       sort(buckets_sorted.data,
+            buckets_sorted.nr,
+            sizeof(buckets_sorted.data[0]),
+            copygc_bucket_cmp,
+            NULL);
+
        ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
-                                 bpos_max(*pos, lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0)),
+                                 lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
                                  lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
                                  0, k, ({
-               *bucket = u64_to_bucket(k.k->p.offset);
+               struct copygc_bucket b = { .bucket = u64_to_bucket(k.k->p.offset) };
+               int ret = 0;
+
+               if (!bucket_in_flight(&buckets_sorted, b) &&
+                   bch2_bucket_is_movable(trans, b.bucket, lru_pos_time(k.k->p), &b.gen))
+                       ret = darray_push(buckets, b) ?: buckets->nr >= fifo_free(buckets_in_flight);
 
-               bch2_bucket_is_movable(trans, *bucket, lru_pos_time(k.k->p), gen);
+               ret;
        }));
+err:
+       darray_exit(&buckets_sorted);
 
-       *pos = iter.pos;
-       if (ret < 0)
-               return ret;
-       return ret ? 0 : -ENOENT;
+       return ret < 0 ? ret : 0;
 }
 
-static int bch2_copygc(struct bch_fs *c)
+static int bch2_copygc(struct btree_trans *trans,
+                      copygc_buckets_in_flight *buckets_in_flight,
+                      struct bch_move_stats *stats)
 {
-       struct bch_move_stats move_stats;
-       struct btree_trans trans;
-       struct moving_context ctxt;
+       struct bch_fs *c = trans->c;
        struct data_update_opts data_opts = {
                .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
        };
-       struct bpos bucket;
-       struct bpos pos;
-       u8 gen = 0;
-       unsigned nr_evacuated;
+       copygc_buckets buckets = { 0 };
+       struct copygc_bucket_in_flight *f;
+       struct copygc_bucket *i;
        int ret = 0;
 
-       bch2_move_stats_init(&move_stats, "copygc");
-       bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
-                             writepoint_ptr(&c->copygc_write_point),
-                             false);
-       bch2_trans_init(&trans, c, 0, 0);
+       ret = bch2_btree_write_buffer_flush(trans);
+       if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
+                                __func__, bch2_err_str(ret)))
+               return ret;
 
-       ret = bch2_btree_write_buffer_flush(&trans);
-       BUG_ON(ret);
+       ret = bch2_copygc_get_buckets(trans, buckets_in_flight, &buckets);
+       if (ret)
+               goto err;
 
-       for (nr_evacuated = 0, pos = POS_MIN;
-            nr_evacuated < 32 && !ret;
-            nr_evacuated++, pos = bpos_nosnap_successor(pos)) {
-               ret = bch2_copygc_next_bucket(&trans, &bucket, &gen, &pos) ?:
-                       __bch2_evacuate_bucket(&trans, &ctxt, bucket, gen, data_opts);
-               if (bkey_eq(pos, POS_MAX))
+       darray_for_each(buckets, i) {
+               if (unlikely(freezing(current)))
                        break;
-       }
 
-       bch2_trans_exit(&trans);
-       bch2_moving_ctxt_exit(&ctxt);
+               f = fifo_push_ref(buckets_in_flight);
+               f->bucket       = i->bucket;
+               f->gen          = i->gen;
+               bch2_moving_ctxt_init(&f->ctxt, c, NULL, NULL, //stats,
+                                     writepoint_ptr(&c->copygc_write_point),
+                                     false);
+
+               ret = __bch2_evacuate_bucket(trans, &f->ctxt, f->bucket, f->gen, data_opts);
+               if (ret)
+                       goto err;
+       }
+err:
+       darray_exit(&buckets);
 
        /* no entries in LRU btree found, or got to end: */
        if (ret == -ENOENT)
@@ -135,7 +221,7 @@ static int bch2_copygc(struct bch_fs *c)
        if (ret < 0 && !bch2_err_matches(ret, EROFS))
                bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret));
 
-       trace_and_count(c, copygc, c, atomic64_read(&move_stats.sectors_moved), 0, 0, 0);
+       trace_and_count(c, copygc, c, atomic64_read(&stats->sectors_moved), 0, 0, 0);
        return ret;
 }
 
@@ -162,7 +248,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
        for_each_rw_member(ca, c, dev_idx) {
                struct bch_dev_usage usage = bch2_dev_usage_read(ca);
 
-               fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
+               fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) *
                                       ca->mi.bucket_size) >> 1);
                fragmented = usage.d[BCH_DATA_user].fragmented;
 
@@ -187,17 +273,36 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
 static int bch2_copygc_thread(void *arg)
 {
        struct bch_fs *c = arg;
+       struct btree_trans trans;
+       struct bch_move_stats move_stats;
        struct io_clock *clock = &c->io_clock[WRITE];
+       copygc_buckets_in_flight copygc_buckets;
        u64 last, wait;
        int ret = 0;
 
+       if (!init_fifo(&copygc_buckets, 1 << 14, GFP_KERNEL)) {
+               bch_err(c, "error allocating copygc buckets in flight");
+               return -ENOMEM;
+       }
+
        set_freezable();
+       bch2_move_stats_init(&move_stats, "copygc");
+       bch2_trans_init(&trans, c, 0, 0);
 
        while (!ret && !kthread_should_stop()) {
+               bch2_trans_unlock(&trans);
+
+               try_to_freeze();
                cond_resched();
 
-               if (kthread_wait_freezable(c->copy_gc_enabled))
-                       break;
+               kthread_wait(freezing(current) || c->copy_gc_enabled);
+
+               if (unlikely(freezing(current))) {
+                       copygc_buckets_wait(&trans, &copygc_buckets, 0, true);
+                       bch2_trans_unlock(&trans);
+                       __refrigerator(false);
+                       continue;
+               }
 
                last = atomic64_read(&clock->now);
                wait = bch2_copygc_wait_amount(c);
@@ -213,12 +318,16 @@ static int bch2_copygc_thread(void *arg)
                c->copygc_wait = 0;
 
                c->copygc_running = true;
-               ret = bch2_copygc(c);
+               ret = bch2_copygc(&trans, &copygc_buckets, &move_stats);
                c->copygc_running = false;
 
                wake_up(&c->copygc_running_wq);
        }
 
+       copygc_buckets_wait(&trans, &copygc_buckets, 0, !ret);
+       free_fifo(&copygc_buckets);
+       bch2_trans_exit(&trans);
+
        return 0;
 }
 
index f5946b4dbce2b847dde77a1bb769d00957e3b1e5..aafe4054d25def18426fa42126bc13c902d8ebd8 100644 (file)
@@ -481,7 +481,7 @@ static int journal_keys_sort(struct bch_fs *c)
        struct genradix_iter iter;
        struct journal_replay *i, **_i;
        struct jset_entry *entry;
-       struct bkey_i *k, *_n;
+       struct bkey_i *k;
        struct journal_keys *keys = &c->journal_keys;
        struct journal_key *src, *dst;
        size_t nr_keys = 0;
@@ -492,7 +492,7 @@ static int journal_keys_sort(struct bch_fs *c)
                if (!i || i->ignore)
                        continue;
 
-               for_each_jset_key(k, _n, entry, &i->j)
+               for_each_jset_key(k, entry, &i->j)
                        nr_keys++;
        }
 
@@ -511,7 +511,7 @@ static int journal_keys_sort(struct bch_fs *c)
                if (!i || i->ignore)
                        continue;
 
-               for_each_jset_key(k, _n, entry, &i->j)
+               for_each_jset_key(k, entry, &i->j)
                        keys->d[keys->nr++] = (struct journal_key) {
                                .btree_id       = entry->btree_id,
                                .level          = entry->level,
@@ -871,7 +871,7 @@ static int verify_superblock_clean(struct bch_fs *c,
                                    IS_ERR(k1) ||
                                    IS_ERR(k2) ||
                                    k1->k.u64s != k2->k.u64s ||
-                                   memcmp(k1, k2, bkey_bytes(k1)) ||
+                                   memcmp(k1, k2, bkey_bytes(&k1->k)) ||
                                    l1 != l2, c,
                        "superblock btree root %u doesn't match journal after clean shutdown\n"
                        "sb:      l=%u %s\n"
index 3956043895de8b18b2f5a88d14224948d15923b7..d994c1577c747bd688f89a060d8ad5b54f59c9b8 100644 (file)
@@ -543,9 +543,10 @@ do {                                                                       \
        submit_bio(bio);                                                \
 } while (0)
 
-#define kthread_wait_freezable(cond)                                   \
+#define kthread_wait(cond)                                             \
 ({                                                                     \
        int _ret = 0;                                                   \
+                                                                       \
        while (1) {                                                     \
                set_current_state(TASK_INTERRUPTIBLE);                  \
                if (kthread_should_stop()) {                            \
@@ -557,7 +558,27 @@ do {                                                                       \
                        break;                                          \
                                                                        \
                schedule();                                             \
-               try_to_freeze();                                        \
+       }                                                               \
+       set_current_state(TASK_RUNNING);                                \
+       _ret;                                                           \
+})
+
+#define kthread_wait_freezable(cond)                                   \
+({                                                                     \
+       int _ret = 0;                                                   \
+       bool frozen;                                                    \
+                                                                       \
+       while (1) {                                                     \
+               set_current_state(TASK_INTERRUPTIBLE);                  \
+               if (kthread_freezable_should_stop(&frozen)) {           \
+                       _ret = -1;                                      \
+                       break;                                          \
+               }                                                       \
+                                                                       \
+               if (cond)                                               \
+                       break;                                          \
+                                                                       \
+               schedule();                                             \
        }                                                               \
        set_current_state(TASK_RUNNING);                                \
        _ret;                                                           \
index b38ded00b9b052869e5ffab9873d0faffe955422..0855e698ced11a7ebdccb983759664acff067618 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/closure.h>
 #include <linux/debugfs.h>
 #include <linux/export.h>
+#include <linux/rcupdate.h>
 #include <linux/seq_file.h>
 #include <linux/sched/debug.h>
 
index 3c7bdb81dff57981a44ad8b1f42347892ef1b8c1..134aeeef2c8125f160c1e1c7c7db4c798cf2e9e9 100644 (file)
@@ -99,6 +99,11 @@ bool kthread_should_stop(void)
        return test_bit(KTHREAD_SHOULD_STOP, &current->kthread_flags);
 }
 
+bool kthread_freezable_should_stop(bool *was_frozen)
+{
+       return test_bit(KTHREAD_SHOULD_STOP, &current->kthread_flags);
+}
+
 /**
  * kthread_stop - stop a thread created by kthread_create().
  * @k: thread created by kthread_create().