]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/buckets.c
Update bcachefs sources to 6a20aede29 bcachefs: Fix quotas + snapshots
[bcachefs-tools-debian] / libbcachefs / buckets.c
index fd3ba10bc8a74c4df2d24b9a1b4f1c08a51cd50b..bce42eef6f576001341fe94283a05ea46652a1d9 100644 (file)
@@ -21,9 +21,9 @@
 #include "reflink.h"
 #include "replicas.h"
 #include "subvolume.h"
+#include "trace.h"
 
 #include <linux/preempt.h>
-#include <trace/events/bcachefs.h>
 
 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
                                              enum bch_data_type data_type,
@@ -486,6 +486,7 @@ static inline void update_cached_sectors_list(struct btree_trans *trans,
 }
 
 int bch2_mark_alloc(struct btree_trans *trans,
+                   enum btree_id btree, unsigned level,
                    struct bkey_s_c old, struct bkey_s_c new,
                    unsigned flags)
 {
@@ -775,7 +776,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
        const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
        unsigned nr_data = s->nr_blocks - s->nr_redundant;
        bool parity = ptr_idx >= nr_data;
-       enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
+       enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
        s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
        const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
        struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
@@ -810,8 +811,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
        if (ret)
                goto err;
 
-       if (data_type)
-               g->data_type = data_type;
+       g->data_type = data_type;
        g->dirty_sectors += sectors;
 
        g->stripe               = k.k->p.offset;
@@ -850,15 +850,17 @@ static int __mark_pointer(struct btree_trans *trans,
 }
 
 static int bch2_mark_pointer(struct btree_trans *trans,
+                            enum btree_id btree_id, unsigned level,
                             struct bkey_s_c k,
                             struct extent_ptr_decoded p,
-                            s64 sectors, enum bch_data_type data_type,
+                            s64 sectors,
                             unsigned flags)
 {
        u64 journal_seq = trans->journal_res.seq;
        struct bch_fs *c = trans->c;
        struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
        struct bucket old, new, *g;
+       enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
        u8 bucket_data_type;
        int ret = 0;
 
@@ -904,13 +906,13 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
        if (!m) {
                bch_err(c, "error allocating memory for gc_stripes, idx %llu",
                        (u64) p.idx);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_mark_stripe_ptr;
        }
 
-       spin_lock(&c->ec_stripes_heap_lock);
+       mutex_lock(&c->ec_stripes_heap_lock);
 
        if (!m || !m->alive) {
-               spin_unlock(&c->ec_stripes_heap_lock);
+               mutex_unlock(&c->ec_stripes_heap_lock);
                bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
                                    (u64) p.idx);
                bch2_inconsistent_error(c);
@@ -920,7 +922,7 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
        m->block_sectors[p.block] += sectors;
 
        r = m->r;
-       spin_unlock(&c->ec_stripes_heap_lock);
+       mutex_unlock(&c->ec_stripes_heap_lock);
 
        r.e.data_type = data_type;
        update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
@@ -929,6 +931,7 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
 }
 
 int bch2_mark_extent(struct btree_trans *trans,
+                    enum btree_id btree_id, unsigned level,
                     struct bkey_s_c old, struct bkey_s_c new,
                     unsigned flags)
 {
@@ -961,8 +964,7 @@ int bch2_mark_extent(struct btree_trans *trans,
                if (flags & BTREE_TRIGGER_OVERWRITE)
                        disk_sectors = -disk_sectors;
 
-               ret = bch2_mark_pointer(trans, k, p, disk_sectors,
-                                       data_type, flags);
+               ret = bch2_mark_pointer(trans, btree_id, level, k, p, disk_sectors, flags);
                if (ret < 0)
                        return ret;
 
@@ -1012,6 +1014,7 @@ int bch2_mark_extent(struct btree_trans *trans,
 }
 
 int bch2_mark_stripe(struct btree_trans *trans,
+                    enum btree_id btree_id, unsigned level,
                     struct bkey_s_c old, struct bkey_s_c new,
                     unsigned flags)
 {
@@ -1031,7 +1034,7 @@ int bch2_mark_stripe(struct btree_trans *trans,
        if (!gc) {
                struct stripe *m = genradix_ptr(&c->stripes, idx);
 
-               if (!m || (old_s && !m->alive)) {
+               if (!m) {
                        struct printbuf buf1 = PRINTBUF;
                        struct printbuf buf2 = PRINTBUF;
 
@@ -1047,13 +1050,10 @@ int bch2_mark_stripe(struct btree_trans *trans,
                }
 
                if (!new_s) {
-                       spin_lock(&c->ec_stripes_heap_lock);
                        bch2_stripes_heap_del(c, m, idx);
-                       spin_unlock(&c->ec_stripes_heap_lock);
 
                        memset(m, 0, sizeof(*m));
                } else {
-                       m->alive        = true;
                        m->sectors      = le16_to_cpu(new_s->sectors);
                        m->algorithm    = new_s->algorithm;
                        m->nr_blocks    = new_s->nr_blocks;
@@ -1063,9 +1063,10 @@ int bch2_mark_stripe(struct btree_trans *trans,
                        for (i = 0; i < new_s->nr_blocks; i++)
                                m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
 
-                       spin_lock(&c->ec_stripes_heap_lock);
-                       bch2_stripes_heap_update(c, m, idx);
-                       spin_unlock(&c->ec_stripes_heap_lock);
+                       if (!old_s)
+                               bch2_stripes_heap_insert(c, m, idx);
+                       else
+                               bch2_stripes_heap_update(c, m, idx);
                }
        } else {
                struct gc_stripe *m =
@@ -1074,7 +1075,7 @@ int bch2_mark_stripe(struct btree_trans *trans,
                if (!m) {
                        bch_err(c, "error allocating memory for gc_stripes, idx %llu",
                                idx);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_mark_stripe;
                }
                /*
                 * This will be wrong when we bring back runtime gc: we should
@@ -1119,6 +1120,7 @@ int bch2_mark_stripe(struct btree_trans *trans,
 }
 
 int bch2_mark_inode(struct btree_trans *trans,
+                   enum btree_id btree_id, unsigned level,
                    struct bkey_s_c old, struct bkey_s_c new,
                    unsigned flags)
 {
@@ -1150,6 +1152,7 @@ int bch2_mark_inode(struct btree_trans *trans,
 }
 
 int bch2_mark_reservation(struct btree_trans *trans,
+                         enum btree_id btree_id, unsigned level,
                          struct bkey_s_c old, struct bkey_s_c new,
                          unsigned flags)
 {
@@ -1236,6 +1239,7 @@ fsck_err:
 }
 
 int bch2_mark_reflink_p(struct btree_trans *trans,
+                       enum btree_id btree_id, unsigned level,
                        struct bkey_s_c old, struct bkey_s_c new,
                        unsigned flags)
 {
@@ -1403,17 +1407,17 @@ static inline int bch2_trans_mark_pointer(struct btree_trans *trans,
        bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
        struct btree_iter iter;
        struct bkey_i_alloc_v4 *a;
-       struct bpos bucket_pos;
+       struct bpos bucket;
        struct bch_backpointer bp;
        s64 sectors;
        int ret;
 
-       bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket_pos, &bp);
+       bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
        sectors = bp.bucket_len;
        if (!insert)
                sectors = -sectors;
 
-       a = bch2_trans_start_alloc_update(trans, &iter, bucket_pos);
+       a = bch2_trans_start_alloc_update(trans, &iter, bucket);
        if (IS_ERR(a))
                return PTR_ERR(a);
 
@@ -1424,7 +1428,7 @@ static inline int bch2_trans_mark_pointer(struct btree_trans *trans,
                goto err;
 
        if (!p.ptr.cached) {
-               ret = bch2_bucket_backpointer_mod(trans, a, bp, k, insert);
+               ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
                if (ret)
                        goto err;
        }
@@ -1444,10 +1448,9 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
        struct bch_replicas_padded r;
        int ret = 0;
 
-       bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
-                            BTREE_ITER_INTENT|
-                            BTREE_ITER_WITH_UPDATES);
-       s = bch2_bkey_get_mut_typed(trans, &iter, stripe);
+       s = bch2_bkey_get_mut_typed(trans, &iter,
+                       BTREE_ID_stripes, POS(0, p.ec.idx),
+                       BTREE_ITER_WITH_UPDATES, stripe);
        ret = PTR_ERR_OR_ZERO(s);
        if (unlikely(ret)) {
                bch2_trans_inconsistent_on(ret == -ENOENT, trans,
@@ -1468,10 +1471,6 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
                stripe_blockcount_get(&s->v, p.ec.block) +
                sectors);
 
-       ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
-       if (ret)
-               goto err;
-
        bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
        r.e.data_type = data_type;
        update_replicas_list(trans, &r.e, sectors);
@@ -1592,6 +1591,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
 
                a->v.stripe             = s.k->p.offset;
                a->v.stripe_redundancy  = s.v->nr_redundant;
+               a->v.data_type          = BCH_DATA_stripe;
        } else {
                if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
                                               a->v.stripe_redundancy != s.v->nr_redundant, trans,
@@ -1604,6 +1604,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
 
                a->v.stripe             = 0;
                a->v.stripe_redundancy  = 0;
+               a->v.data_type          = alloc_data_type(a->v, BCH_DATA_user);
        }
 
        a->v.dirty_sectors += sectors;
@@ -1744,10 +1745,9 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
        struct printbuf buf = PRINTBUF;
        int ret;
 
-       bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
-                            BTREE_ITER_INTENT|
-                            BTREE_ITER_WITH_UPDATES);
-       k = bch2_bkey_get_mut(trans, &iter);
+       k = bch2_bkey_get_mut_noupdate(trans, &iter,
+                       BTREE_ID_reflink, POS(0, *idx),
+                       BTREE_ITER_WITH_UPDATES);
        ret = PTR_ERR_OR_ZERO(k);
        if (ret)
                goto err;
@@ -1849,7 +1849,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
        if (IS_ERR(a))
                return PTR_ERR(a);
 
-       if (a->v.data_type && a->v.data_type != type) {
+       if (a->v.data_type && type && a->v.data_type != type) {
                bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
                        "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
                        "while marking %s",
@@ -2039,15 +2039,21 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
        struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
        unsigned long *buckets_nouse = NULL;
        bool resize = ca->bucket_gens != NULL;
-       int ret = -ENOMEM;
+       int ret;
 
        if (!(bucket_gens       = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
-                                           GFP_KERNEL|__GFP_ZERO)) ||
-           (c->opts.buckets_nouse &&
+                                           GFP_KERNEL|__GFP_ZERO))) {
+               ret = -BCH_ERR_ENOMEM_bucket_gens;
+               goto err;
+       }
+
+       if ((c->opts.buckets_nouse &&
             !(buckets_nouse    = kvpmalloc(BITS_TO_LONGS(nbuckets) *
                                            sizeof(unsigned long),
-                                           GFP_KERNEL|__GFP_ZERO))))
+                                           GFP_KERNEL|__GFP_ZERO)))) {
+               ret = -BCH_ERR_ENOMEM_buckets_nouse;
                goto err;
+       }
 
        bucket_gens->first_bucket = ca->mi.first_bucket;
        bucket_gens->nbuckets   = nbuckets;
@@ -2117,12 +2123,12 @@ int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
 
        ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
        if (!ca->usage_base)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_usage_init;
 
        for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
                ca->usage[i] = alloc_percpu(struct bch_dev_usage);
                if (!ca->usage[i])
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_usage_init;
        }
 
        return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);