-bca25b802d99014f6ef6d7cb7fa3d493fb3841b5
+84f132d5696138bb038d2dc8f1162d2fab5ac832
#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
-static inline struct bio_vec bio_iter_all_peek(const struct bio *bio,
+static inline struct bio_vec *bio_next_segment(const struct bio *bio,
struct bvec_iter_all *iter)
{
- if (WARN_ON(iter->idx >= bio->bi_vcnt))
- return (struct bio_vec) { NULL };
+ if (iter->idx >= bio->bi_vcnt)
+ return NULL;
- return bvec_iter_all_peek(bio->bi_io_vec, iter);
+ return &bio->bi_io_vec[iter->idx];
}
-static inline void bio_iter_all_advance(const struct bio *bio,
- struct bvec_iter_all *iter,
- unsigned bytes)
-{
- bvec_iter_all_advance(bio->bi_io_vec, iter, bytes);
-
- WARN_ON(iter->idx > bio->bi_vcnt ||
- (iter->idx == bio->bi_vcnt && iter->done));
-}
-
-#define bio_for_each_segment_all_continue(bvl, bio, iter) \
- for (; \
- iter.idx < bio->bi_vcnt && \
- ((bvl = bio_iter_all_peek(bio, &iter)), true); \
- bio_iter_all_advance((bio), &iter, bvl.bv_len))
-
-/*
- * drivers should _never_ use the all version - the bio may have been split
- * before it got to the driver and the driver won't own all of it
- */
-#define bio_for_each_segment_all(bvl, bio, iter) \
- for (bvec_iter_all_init(&iter); \
- iter.idx < (bio)->bi_vcnt && \
- ((bvl = bio_iter_all_peek((bio), &iter)), true); \
- bio_iter_all_advance((bio), &iter, bvl.bv_len))
+#define bio_for_each_segment_all(bvl, bio, iter) \
+ for ((iter).idx = 0; (bvl = bio_next_segment((bio), &(iter))); (iter).idx++)
static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
unsigned bytes)
current bvec */
};
+struct bvec_iter_all {
+ int idx;
+};
+
/*
* various member access, note that bio_data should of course not be used
* on highmem page vectors
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
-/*
- * bvec_iter_all: for advancing over individual pages in a bio, as it was when
- * it was first created:
- */
-struct bvec_iter_all {
- int idx;
- unsigned done;
-};
-
-static inline void bvec_iter_all_init(struct bvec_iter_all *iter_all)
-{
- iter_all->done = 0;
- iter_all->idx = 0;
-}
-
-static inline struct bio_vec __bvec_iter_all_peek(const struct bio_vec *bvec,
- const struct bvec_iter_all *iter)
-{
- struct bio_vec bv = bvec[iter->idx];
-
- BUG_ON(iter->done >= bv.bv_len);
-
- bv.bv_offset += iter->done;
- bv.bv_len -= iter->done;
- return bv;
-}
-
-static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,
- const struct bvec_iter_all *iter)
-{
- struct bio_vec bv = __bvec_iter_all_peek(bvec, iter);
-
- bv.bv_len = min_t(unsigned, PAGE_SIZE - bv.bv_offset, bv.bv_len);
- return bv;
-}
-
-static inline void bvec_iter_all_advance(const struct bio_vec *bvec,
- struct bvec_iter_all *iter,
- unsigned bytes)
-{
- iter->done += bytes;
-
- while (iter->done && iter->done >= bvec[iter->idx].bv_len) {
- iter->done -= bvec[iter->idx].bv_len;
- iter->idx++;
- }
-}
-
#endif /* __LINUX_BVEC_ITER_H */
posix_acl_xattr_entry a_entries[0];
} posix_acl_xattr_header;
-extern const struct xattr_handler posix_acl_access_xattr_handler;
-extern const struct xattr_handler posix_acl_default_xattr_handler;
+extern const struct xattr_handler nop_posix_acl_access;
+extern const struct xattr_handler nop_posix_acl_default;
#endif /* _POSIX_ACL_XATTR_H */
#include <string.h>
#include <asm/types.h>
+#include <stdbool.h>
-typedef struct {
- __u8 b[16];
-} uuid_le;
+#define UUID_SIZE 16
typedef struct {
- __u8 b[16];
-} uuid_be;
-
-#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
-((uuid_le) \
-{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
- (b) & 0xff, ((b) >> 8) & 0xff, \
- (c) & 0xff, ((c) >> 8) & 0xff, \
- (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+ __u8 b[UUID_SIZE];
+} __uuid_t;
-#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
-((uuid_be) \
+#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+((__uuid_t) \
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
((b) >> 8) & 0xff, (b) & 0xff, \
((c) >> 8) & 0xff, (c) & 0xff, \
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+static inline bool uuid_equal(const __uuid_t *u1, const __uuid_t *u2)
+{
+ return memcmp(u1, u2, sizeof(__uuid_t)) == 0;
+}
+
#endif
struct format_opts {
char *label;
- uuid_le uuid;
+ __uuid_t uuid;
unsigned version;
unsigned superblock_size;
bool encrypted;
int bcachectl_open(void);
struct bchfs_handle {
- uuid_le uuid;
- int ioctl_fd;
- int sysfs_fd;
+ __uuid_t uuid;
+ int ioctl_fd;
+ int sysfs_fd;
};
void bcache_fs_close(struct bchfs_handle);
unsigned idx;
char *dev;
char *label;
- uuid_le uuid;
+ uuid_t uuid;
};
typedef DARRAY(struct dev_name) dev_names;
bch2_trans_exit(&trans);
if (ret)
- bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
bch2_trans_exit(&trans);
if (ret)
- bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
-
+ bch_err_fn(c, ret);
return ret;
}
bch2_trans_exit(&trans);
if (ret)
- bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
return k;
}
-static int bch2_check_alloc_key(struct btree_trans *trans,
- struct bkey_s_c alloc_k,
- struct btree_iter *alloc_iter,
- struct btree_iter *discard_iter,
- struct btree_iter *freespace_iter,
- struct btree_iter *bucket_gens_iter)
+static noinline_for_stack
+int bch2_check_alloc_key(struct btree_trans *trans,
+ struct bkey_s_c alloc_k,
+ struct btree_iter *alloc_iter,
+ struct btree_iter *discard_iter,
+ struct btree_iter *freespace_iter,
+ struct btree_iter *bucket_gens_iter)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca;
return ret;
}
-static int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
- struct bpos start,
- struct bpos *end,
- struct btree_iter *freespace_iter)
+static noinline_for_stack
+int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
+ struct bpos start,
+ struct bpos *end,
+ struct btree_iter *freespace_iter)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca;
return ret;
}
-static int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
- struct bpos start,
- struct bpos *end,
- struct btree_iter *bucket_gens_iter)
+static noinline_for_stack
+int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
+ struct bpos start,
+ struct bpos *end,
+ struct btree_iter *bucket_gens_iter)
{
struct bch_fs *c = trans->c;
struct bkey_s_c k;
return ret;
}
-static int __bch2_check_discard_freespace_key(struct btree_trans *trans,
+static noinline_for_stack int __bch2_check_discard_freespace_key(struct btree_trans *trans,
struct btree_iter *iter)
{
struct bch_fs *c = trans->c;
* valid for buckets that exist; this just checks for keys for nonexistent
* buckets.
*/
-static int bch2_check_bucket_gens_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
+static noinline_for_stack
+int bch2_check_bucket_gens_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
struct bkey_i_bucket_gens g;
bch2_check_bucket_gens_key(&trans, &iter, k));
err:
bch2_trans_exit(&trans);
- return ret < 0 ? ret : 0;
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
}
static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
{
- struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
-
- for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
- POS_MIN, BTREE_ITER_PREFETCH, k,
- NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
- bch2_check_alloc_to_lru_ref(&trans, &iter));
-
- bch2_trans_exit(&trans);
- return ret < 0 ? ret : 0;
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
+ POS_MIN, BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ bch2_check_alloc_to_lru_ref(&trans, &iter)));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
}
static int bch2_discard_one_bucket(struct btree_trans *trans,
ret = bch2_dev_freespace_init(c, ca, &last_updated);
if (ret) {
percpu_ref_put(&ca->ref);
+ bch_err_fn(c, ret);
return ret;
}
}
mutex_lock(&c->sb_lock);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
-
bch_verbose(c, "done initializing freespace");
}
- return ret;
+ return 0;
}
/* Bucket IO clocks: */
u64 free = max_t(s64, 0,
u.d[BCH_DATA_free].buckets
+ u.d[BCH_DATA_need_discard].buckets
- - bch2_dev_buckets_reserved(ca, RESERVE_stripe));
+ - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
}
}
}
-const char * const bch2_alloc_reserves[] = {
+const char * const bch2_watermarks[] = {
#define x(t) #t,
- BCH_ALLOC_RESERVES()
+ BCH_WATERMARKS()
#undef x
NULL
};
return -1;
}
-static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
+static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
{
- switch (reserve) {
- case RESERVE_btree:
- case RESERVE_btree_movinggc:
+ switch (watermark) {
+ case BCH_WATERMARK_btree:
+ case BCH_WATERMARK_btree_copygc:
return 0;
- case RESERVE_movinggc:
+ case BCH_WATERMARK_copygc:
return OPEN_BUCKETS_COUNT / 4;
default:
return OPEN_BUCKETS_COUNT / 2;
static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 bucket,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
const struct bch_alloc_v4 *a,
struct bucket_alloc_state *s,
struct closure *cl)
spin_lock(&c->freelist_lock);
- if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
+ if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
if (cl)
closure_wait(&c->open_buckets_wait, cl);
}
static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
- enum alloc_reserve reserve, u64 free_entry,
+ enum bch_watermark watermark, u64 free_entry,
struct bucket_alloc_state *s,
struct bkey_s_c freespace_k,
struct closure *cl)
}
}
- ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
+ ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
if (!ob)
iter.path->preserve = false;
err:
static noinline struct open_bucket *
bch2_bucket_alloc_early(struct btree_trans *trans,
struct bch_dev *ca,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct bucket_alloc_state *s,
struct closure *cl)
{
s->buckets_seen++;
- ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
+ ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
if (ob)
break;
}
static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
struct bch_dev *ca,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct bucket_alloc_state *s,
struct closure *cl)
{
s->buckets_seen++;
- ob = try_alloc_bucket(trans, ca, reserve,
+ ob = try_alloc_bucket(trans, ca, watermark,
alloc_cursor, s, k, cl);
if (ob) {
iter.path->preserve = false;
*/
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
struct bch_dev *ca,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct closure *cl,
struct bch_dev_usage *usage)
{
bool waiting = false;
again:
bch2_dev_usage_read_fast(ca, usage);
- avail = dev_buckets_free(ca, *usage, reserve);
+ avail = dev_buckets_free(ca, *usage, watermark);
if (usage->d[BCH_DATA_need_discard].buckets > avail)
bch2_do_discards(c);
closure_wake_up(&c->freelist_wait);
alloc:
ob = likely(freespace)
- ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
- : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
+ ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
+ : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
if (s.skipped_need_journal_commit * 2 > avail)
bch2_journal_flush_async(&c->journal, NULL);
if (!IS_ERR(ob))
trace_and_count(c, bucket_alloc, ca,
- bch2_alloc_reserves[reserve],
+ bch2_watermarks[watermark],
ob->bucket,
usage->d[BCH_DATA_free].buckets,
avail,
"");
else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
trace_and_count(c, bucket_alloc_fail, ca,
- bch2_alloc_reserves[reserve],
+ bch2_watermarks[watermark],
0,
usage->d[BCH_DATA_free].buckets,
avail,
}
struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct closure *cl)
{
struct bch_dev_usage usage;
struct open_bucket *ob;
bch2_trans_do(c, NULL, NULL, 0,
- PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
+ PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, watermark,
cl, &usage)));
return ob;
}
struct bch_dev_usage *usage)
{
u64 *v = stripe->next_alloc + ca->dev_idx;
- u64 free_space = dev_buckets_available(ca, RESERVE_none);
+ u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
u64 free_space_inv = free_space
? div64_u64(1ULL << 48, free_space)
: 1ULL << 48;
bool *have_cache,
unsigned flags,
enum bch_data_type data_type,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct closure *cl)
{
struct bch_fs *c = trans->c;
continue;
}
- ob = bch2_bucket_alloc_trans(trans, ca, reserve, cl, &usage);
+ ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
percpu_ref_put(&ca->ref);
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags,
struct closure *cl)
{
if (ec_open_bucket(c, ptrs))
return 0;
- h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl);
+ h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
if (IS_ERR(h))
return PTR_ERR(h);
if (!h)
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache, bool ec,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags)
{
int i, ret = 0;
u64 avail;
bch2_dev_usage_read_fast(ca, &usage);
- avail = dev_buckets_free(ca, usage, reserve);
+ avail = dev_buckets_free(ca, usage, watermark);
if (!avail)
continue;
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags,
struct closure *_cl)
{
ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
nr_replicas, nr_effective,
- have_cache, erasure_code, reserve, flags);
+ have_cache, erasure_code, watermark, flags);
if (ret)
return ret;
target,
nr_replicas, nr_effective,
have_cache,
- reserve, flags, _cl);
+ watermark, flags, _cl);
} else {
retry_blocking:
/*
*/
ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
nr_replicas, nr_effective, have_cache,
- flags, wp->data_type, reserve, cl);
+ flags, wp->data_type, watermark, cl);
if (ret &&
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
!bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags,
struct closure *cl)
{
ret = __open_bucket_add_buckets(trans, ptrs, wp,
devs_have, target, erasure_code,
nr_replicas, nr_effective, have_cache,
- reserve, flags, cl);
+ watermark, flags, cl);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
ret = __open_bucket_add_buckets(trans, ptrs, wp,
devs_have, target, false,
nr_replicas, nr_effective, have_cache,
- reserve, flags, cl);
+ watermark, flags, cl);
return ret < 0 ? ret : 0;
}
struct bch_devs_list *devs_have,
unsigned nr_replicas,
unsigned nr_replicas_required,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags,
struct closure *cl,
struct write_point **wp_ret)
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,
- &have_cache, reserve,
+ &have_cache, watermark,
flags, NULL);
if (!ret ||
bch2_err_matches(ret, BCH_ERR_transaction_restart))
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
0, erasure_code,
nr_replicas, &nr_effective,
- &have_cache, reserve,
+ &have_cache, watermark,
flags, cl);
} else {
allocate_blocking:
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,
- &have_cache, reserve,
+ &have_cache, watermark,
flags, cl);
}
alloc_done:
struct bch_fs;
struct bch_devs_List;
-extern const char * const bch2_alloc_reserves[];
+extern const char * const bch2_watermarks[];
void bch2_reset_alloc_cursors(struct bch_fs *);
long bch2_bucket_alloc_new_fs(struct bch_dev *);
struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
- enum alloc_reserve, struct closure *);
+ enum bch_watermark, struct closure *);
static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
struct open_bucket *ob)
int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
struct dev_stripe_state *, struct bch_devs_mask *,
unsigned, unsigned *, bool *, unsigned,
- enum bch_data_type, enum alloc_reserve,
+ enum bch_data_type, enum bch_watermark,
struct closure *);
int bch2_alloc_sectors_start_trans(struct btree_trans *,
struct write_point_specifier,
struct bch_devs_list *,
unsigned, unsigned,
- enum alloc_reserve,
+ enum bch_watermark,
unsigned,
struct closure *,
struct write_point **);
u64 skipped_nouse;
};
-struct ec_bucket_buf;
-
-#define BCH_ALLOC_RESERVES() \
- x(btree_movinggc) \
+#define BCH_WATERMARKS() \
+ x(btree_copygc) \
x(btree) \
- x(movinggc) \
- x(none) \
+ x(copygc) \
+ x(normal) \
x(stripe)
-enum alloc_reserve {
-#define x(name) RESERVE_##name,
- BCH_ALLOC_RESERVES()
+enum bch_watermark {
+#define x(name) BCH_WATERMARK_##name,
+ BCH_WATERMARKS()
#undef x
- RESERVE_NR,
+ BCH_WATERMARK_NR,
};
#define OPEN_BUCKETS_COUNT 1024
{
struct btree_iter iter;
struct bkey_s_c k;
+ int ret;
- return bch2_trans_run(c,
+ ret = bch2_trans_run(c,
for_each_btree_key_commit(&trans, iter,
BTREE_ID_backpointers, POS_MIN, 0, k,
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
bch2_check_btree_backpointer(&trans, &iter, k)));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
}
struct bpos_level {
}
bch2_trans_exit(&trans);
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
if (fsck_err_on(!k.k, c,
"backpointer for missing extent\n %s",
- (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf)))
- return bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
+ (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
+ ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
+ goto out;
+ }
out:
fsck_err:
bch2_trans_iter_exit(trans, &iter);
}
bch2_trans_exit(&trans);
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
set_bkey_val_u64s(&bp_k->k, 0);
}
- return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k->k_i);
+ return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k->k_i, !insert);
}
static inline enum bch_data_type bkey_ptr_data_type(enum btree_id btree_id, unsigned level,
#include "fifo.h"
#include "nocow_locking_types.h"
#include "opts.h"
+#include "seqmutex.h"
#include "util.h"
#ifdef CONFIG_BCACHEFS_DEBUG
#define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
printk_ratelimited(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
+#define bch_err_fn(_c, _ret) \
+ bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret))
+#define bch_err_msg(_c, _ret, _msg) \
+ bch_err(_c, "%s(): error " _msg " %s", __func__, bch2_err_str(_ret))
+
#define bch_verbose(c, fmt, ...) \
do { \
if ((c)->opts.verbose) \
* Committed by bch2_write_super() -> bch_fs_mi_update()
*/
struct bch_member_cpu mi;
- uuid_le uuid;
+ __uuid_t uuid;
char name[BDEVNAME_SIZE];
struct bch_sb_handle disk_sb;
/* Updated by bch2_sb_update():*/
struct {
- uuid_le uuid;
- uuid_le user_uuid;
+ __uuid_t uuid;
+ __uuid_t user_uuid;
u16 version;
u16 version_min;
} btree_write_stats[BTREE_WRITE_TYPE_NR];
/* btree_iter.c: */
- struct mutex btree_trans_lock;
+ struct seqmutex btree_trans_lock;
struct list_head btree_trans_list;
mempool_t btree_paths_pool;
mempool_t btree_trans_mem_pool;
#include <linux/uuid.h>
#include "vstructs.h"
+#ifdef __KERNEL__
+typedef uuid_t __uuid_t;
+#endif
+
#define BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
#define BCH_MIN_NR_NBUCKETS (1 << 6)
struct bch_member {
- uuid_le uuid;
+ __uuid_t uuid;
__le64 nbuckets; /* device size */
__le16 first_bucket; /* index of first bucket used */
__le16 bucket_size; /* sectors */
#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
struct bch_sb_layout {
- uuid_le magic; /* bcachefs superblock UUID */
+ __uuid_t magic; /* bcachefs superblock UUID */
__u8 layout_type;
__u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
__u8 nr_superblocks;
__le16 version;
__le16 version_min;
__le16 pad[2];
- uuid_le magic;
- uuid_le uuid;
- uuid_le user_uuid;
+ __uuid_t magic;
+ __uuid_t uuid;
+ __uuid_t user_uuid;
__u8 label[BCH_SB_LABEL_SIZE];
__le64 offset;
__le64 seq;
*/
#define BCACHE_MAGIC \
- UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
- 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
+ UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca, \
+ 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
#define BCHFS_MAGIC \
- UUID_LE(0xf67385c6, 0xce66, 0xa990, \
- 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
+ UUID_INIT(0xc68573f6, 0x66ce, 0x90a9, \
+ 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
#define BCACHEFS_STATFS_MAGIC 0xca451a4e
* this UUID.
*/
struct bch_ioctl_query_uuid {
- uuid_le uuid;
+ __uuid_t uuid;
};
#if 0
#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
#define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
-#define BTREE_TRIGGER_WANTS_OLD_AND_NEW \
- ((1U << KEY_TYPE_alloc)| \
- (1U << KEY_TYPE_alloc_v2)| \
- (1U << KEY_TYPE_alloc_v3)| \
- (1U << KEY_TYPE_alloc_v4)| \
- (1U << KEY_TYPE_stripe)| \
- (1U << KEY_TYPE_inode)| \
- (1U << KEY_TYPE_inode_v2)| \
- (1U << KEY_TYPE_snapshot))
-
static inline int bch2_trans_mark_key(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_i *new,
struct btree *b;
unsigned i, flags;
- if (bc->shrink.list.next)
- unregister_shrinker(&bc->shrink);
+ unregister_shrinker(&bc->shrink);
/* vfree() can allocate memory: */
flags = memalloc_nofs_save();
}
if (ret) {
- bch_err(c, "%s: error getting btree node: %s",
- __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "getting btree node");
break;
}
ret = PTR_ERR_OR_ZERO(cur);
if (ret) {
- bch_err(c, "%s: error getting btree node: %s",
- __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "getting btree node");
goto err;
}
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
if (!new) {
- bch_err(c, "%s: error allocating new key", __func__);
+ bch_err_msg(c, ret, "allocating new key");
ret = -BCH_ERR_ENOMEM_gc_repair_key;
goto err;
}
fsck_err:
err:
if (ret)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
false, &k, true);
- if (ret) {
- bch_err(c, "%s: error from bch2_gc_mark_key: %s",
- __func__, bch2_err_str(ret));
+ if (ret)
goto fsck_err;
- }
if (b->c.level) {
bch2_bkey_buf_reassemble(&cur, c, k);
continue;
}
} else if (ret) {
- bch_err(c, "%s: error getting btree node: %s",
- __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "getting btree node");
break;
}
six_unlock_read(&b->c.lock);
if (ret < 0)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
printbuf_exit(&buf);
return ret;
}
: bch2_gc_btree(&trans, ids[i], initial, metadata_only);
if (ret < 0)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
bch2_trans_exit(&trans);
return ret;
if (ca)
percpu_ref_put(&ca->ref);
if (ret)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
percpu_up_write(&c->mark_lock);
printbuf_exit(&buf);
" should be %u",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
- struct bkey_i *new = bch2_bkey_make_mut(trans, iter, k, 0);
+ struct bkey_i *new = bch2_bkey_make_mut(trans, iter, &k, 0);
ret = PTR_ERR_OR_ZERO(new);
if (ret)
* allocator thread - issue wakeup in case they blocked on gc_lock:
*/
closure_wake_up(&c->freelist_wait);
+
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
percpu_up_read(&c->mark_lock);
return 0;
update:
- u = bch2_bkey_make_mut(trans, iter, k, 0);
+ u = bch2_bkey_make_mut(trans, iter, &k, 0);
ret = PTR_ERR_OR_ZERO(u);
if (ret)
return ret;
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
struct btree_trans *pos;
- mutex_lock(&c->btree_trans_lock);
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(pos, &c->btree_trans_list, list) {
/*
* We'd much prefer to be stricter here and completely
}
list_add_tail(&trans->list, &c->btree_trans_list);
list_add_done:
- mutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
}
}
bch2_trans_unlock(trans);
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
+ seqmutex_lock(&c->btree_trans_lock);
+ list_del(&trans->list);
+ seqmutex_unlock(&c->btree_trans_lock);
+ }
+
closure_sync(&trans->ref);
if (s)
check_btree_paths_leaked(trans);
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
- mutex_lock(&c->btree_trans_lock);
- list_del(&trans->list);
- mutex_unlock(&c->btree_trans_lock);
- }
-
srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
bch2_journal_preres_put(&c->journal, &trans->journal_preres);
}
INIT_LIST_HEAD(&c->btree_trans_list);
- mutex_init(&c->btree_trans_lock);
+ seqmutex_init(&c->btree_trans_lock);
ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
sizeof(struct btree_path) * nr +
static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
{
BUG_ON(err <= 0);
- BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart));
+ BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
trans->restarted = err;
trans->last_restarted_ip = _THIS_IP_;
int cpu;
#endif
- if (bc->shrink.list.next)
- unregister_shrinker(&bc->shrink);
+ unregister_shrinker(&bc->shrink);
mutex_lock(&bc->lock);
lock_graph_up(g);
}
-static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
{
- closure_get(&trans->ref);
-
g->g[g->nr++] = (struct trans_waiting_for_lock) {
.trans = trans,
.node_want = trans->locking,
};
}
+static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+{
+ closure_get(&trans->ref);
+ __lock_graph_down(g, trans);
+}
+
static bool lock_graph_remove_non_waiters(struct lock_graph *g)
{
struct trans_waiting_for_lock *i;
struct trans_waiting_for_lock *i;
for (i = g->g; i < g->g + g->nr; i++)
- if (i->trans == trans)
+ if (i->trans == trans) {
+ closure_put(&trans->ref);
return break_cycle(g, cycle);
+ }
if (g->nr == ARRAY_SIZE(g->g)) {
+ closure_put(&trans->ref);
+
if (orig_trans->lock_may_not_fail)
return 0;
return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
}
- lock_graph_down(g, trans);
+ __lock_graph_down(g, trans);
return 0;
}
!lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
continue;
- ret = lock_graph_descend(&g, trans, cycle);
+ closure_get(&trans->ref);
raw_spin_unlock(&b->lock.wait_lock);
+ ret = lock_graph_descend(&g, trans, cycle);
if (ret)
return ret;
goto next;
int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
struct bkey_i *, enum btree_update_flags);
int __must_check bch2_trans_update_buffered(struct btree_trans *,
- enum btree_id, struct bkey_i *);
+ enum btree_id, struct bkey_i *, bool);
void bch2_trans_commit_hook(struct btree_trans *,
struct btree_trans_commit_hook *);
KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k, unsigned flags,
+ struct bkey_s_c *k, unsigned flags,
unsigned type, unsigned min_bytes)
{
- struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, k, type, min_bytes);
+ struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, *k, type, min_bytes);
int ret;
if (IS_ERR(mut))
ret = bch2_trans_update(trans, iter, mut, flags);
if (ret)
return ERR_PTR(ret);
+
+ *k = bkey_i_to_s_c(mut);
return mut;
}
static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k, unsigned flags)
+ struct bkey_s_c *k, unsigned flags)
{
return __bch2_bkey_make_mut(trans, iter, k, flags, 0, 0);
}
struct open_buckets ob = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
unsigned nr_reserve;
- enum alloc_reserve alloc_reserve;
+ enum bch_watermark alloc_reserve;
int ret;
if (flags & BTREE_INSERT_USE_RESERVE) {
nr_reserve = 0;
- alloc_reserve = RESERVE_btree_movinggc;
+ alloc_reserve = BCH_WATERMARK_btree_copygc;
} else {
nr_reserve = BTREE_NODE_RESERVE;
- alloc_reserve = RESERVE_btree;
+ alloc_reserve = BCH_WATERMARK_btree;
}
mutex_lock(&c->btree_reserve_cache_lock);
return 0;
if (bch2_bkey_ops[old.k->type].atomic_trigger ==
- bch2_bkey_ops[i->k->k.type].atomic_trigger &&
- ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
+ bch2_bkey_ops[i->k->k.type].atomic_trigger) {
ret = bch2_mark_key(trans, i->btree_id, i->level,
old, bkey_i_to_s_c(new),
BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
if (!i->insert_trigger_run &&
!i->overwrite_trigger_run &&
bch2_bkey_ops[old.k->type].trans_trigger ==
- bch2_bkey_ops[i->k->k.type].trans_trigger &&
- ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
+ bch2_bkey_ops[i->k->k.type].trans_trigger) {
i->overwrite_trigger_run = true;
i->insert_trigger_run = true;
return bch2_trans_mark_key(trans, i->btree_id, i->level, old, i->k,
struct bpos pos)
{
if (!btree_type_has_snapshots(id) ||
- pos.snapshot == U32_MAX ||
!snapshot_t(trans->c, pos.snapshot)->children[0])
return 0;
int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
enum btree_id btree,
- struct bkey_i *k)
+ struct bkey_i *k,
+ bool head)
{
- struct btree_write_buffered_key *i;
- int ret;
+ int ret, pos;
EBUG_ON(trans->nr_wb_updates > trans->wb_updates_size);
EBUG_ON(k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
- trans_for_each_wb_update(trans, i) {
- if (i->btree == btree && bpos_eq(i->k.k.p, k->k.p)) {
- bkey_copy(&i->k, k);
- return 0;
- }
- }
-
if (!trans->wb_updates ||
trans->nr_wb_updates == trans->wb_updates_size) {
struct btree_write_buffered_key *u;
trans->wb_updates = u;
}
- trans->wb_updates[trans->nr_wb_updates] = (struct btree_write_buffered_key) {
- .btree = btree,
- };
+ if (head) {
+ memmove(&trans->wb_updates[1],
+ &trans->wb_updates[0],
+ sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
+ pos = 0;
+ } else {
+ pos = trans->nr_wb_updates;
+ }
- bkey_copy(&trans->wb_updates[trans->nr_wb_updates].k, k);
+ trans->wb_updates[pos] = (struct btree_write_buffered_key) { .btree = btree, };
+ bkey_copy(&trans->wb_updates[pos].k, k);
trans->nr_wb_updates++;
-
return 0;
}
bkey_init(&k->k);
k->k.p = pos;
- return bch2_trans_update_buffered(trans, btree, k);
+ return bch2_trans_update_buffered(trans, btree, k, false);
}
int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
struct btree_write_buffer *wb = &c->btree_write_buffer;
struct btree_write_buffered_key *i;
union btree_write_buffer_state old, new;
+ unsigned offset = 0;
int ret = 0;
u64 v;
EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
i->journal_seq = trans->journal_res.seq;
- i->journal_offset = trans->journal_res.offset;
+ i->journal_offset = trans->journal_res.offset + offset;
+ offset++;
}
preempt_disable();
return 0;
}
-int bch2_mark_extent(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+static int __mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
return 0;
}
+int bch2_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ return mem_trigger_run_insert_then_overwrite(__mark_extent, trans, btree_id, level, old, new, flags);
+}
+
int bch2_mark_stripe(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
return 0;
}
-int bch2_mark_reservation(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+static int __mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bch_fs_usage __percpu *fs_usage;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
return 0;
}
+int bch2_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ return mem_trigger_run_insert_then_overwrite(__mark_reservation, trans, btree_id, level, old, new, flags);
+}
+
static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
struct bkey_s_c_reflink_p p,
u64 start, u64 end,
return ret;
}
-int bch2_mark_reflink_p(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+static int __mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
struct reflink_gc *ref;
size_t l, r, m;
return ret;
}
+int bch2_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ return mem_trigger_run_insert_then_overwrite(__mark_reflink_p, trans, btree_id, level, old, new, flags);
+}
+
void bch2_trans_fs_usage_revert(struct btree_trans *trans,
struct replicas_delta_list *deltas)
{
ret = __mark_pointer(trans, k, &p.ptr, sectors, bp.data_type,
a->v.gen, &a->v.data_type,
- &a->v.dirty_sectors, &a->v.cached_sectors);
+ &a->v.dirty_sectors, &a->v.cached_sectors) ?:
+ bch2_trans_update(trans, &iter, &a->k_i, 0);
+ bch2_trans_iter_exit(trans, &iter);
+
if (ret)
- goto err;
+ return ret;
if (!p.ptr.cached) {
ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
if (ret)
- goto err;
+ return ret;
}
- ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return 0;
}
static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
return ret;
}
-int bch2_trans_mark_extent(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_i *new,
- unsigned flags)
+static int __trans_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
return ret;
}
+int bch2_trans_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
+{
+ return trigger_run_insert_then_overwrite(__trans_mark_extent, trans, btree_id, level, old, new, flags);
+}
+
static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
struct bkey_s_c_stripe s,
unsigned idx, bool deleting)
return 0;
}
-int bch2_trans_mark_reservation(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- struct bkey_i *new,
- unsigned flags)
+static int __trans_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
struct replicas_delta_list *d;
return 0;
}
-static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
+int bch2_trans_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
+{
+ return trigger_run_insert_then_overwrite(__trans_mark_reservation, trans, btree_id, level, old, new, flags);
+}
+
+static int trans_mark_reflink_p_segment(struct btree_trans *trans,
struct bkey_s_c_reflink_p p,
u64 *idx, unsigned flags)
{
return ret;
}
-int bch2_trans_mark_reflink_p(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- struct bkey_i *new,
- unsigned flags)
+static int __trans_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
u64 idx, end_idx;
int ret = 0;
- if (flags & BTREE_TRIGGER_INSERT) {
- struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
-
- v->front_pad = v->back_pad = 0;
- }
-
idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
end_idx = le64_to_cpu(p.v->idx) + p.k->size +
le32_to_cpu(p.v->back_pad);
while (idx < end_idx && !ret)
- ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
-
+ ret = trans_mark_reflink_p_segment(trans, p, &idx, flags);
return ret;
}
+int bch2_trans_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
+{
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_reflink_p *v = &bkey_i_to_reflink_p(new)->v;
+
+ v->front_pad = v->back_pad = 0;
+ }
+
+ return trigger_run_insert_then_overwrite(__trans_mark_reflink_p, trans, btree_id, level, old, new, flags);
+}
+
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
struct bch_dev *ca, size_t b,
enum bch_data_type type,
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
{
- return bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
+ int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
}
/* Disk reservations: */
void bch2_dev_usage_init(struct bch_dev *);
-static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reserve reserve)
+static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
{
s64 reserved = 0;
- switch (reserve) {
- case RESERVE_NR:
+ switch (watermark) {
+ case BCH_WATERMARK_NR:
unreachable();
- case RESERVE_stripe:
+ case BCH_WATERMARK_stripe:
reserved += ca->mi.nbuckets >> 6;
fallthrough;
- case RESERVE_none:
+ case BCH_WATERMARK_normal:
reserved += ca->mi.nbuckets >> 6;
fallthrough;
- case RESERVE_movinggc:
+ case BCH_WATERMARK_copygc:
reserved += ca->nr_btree_reserve;
fallthrough;
- case RESERVE_btree:
+ case BCH_WATERMARK_btree:
reserved += ca->nr_btree_reserve;
fallthrough;
- case RESERVE_btree_movinggc:
+ case BCH_WATERMARK_btree_copygc:
break;
}
static inline u64 dev_buckets_free(struct bch_dev *ca,
struct bch_dev_usage usage,
- enum alloc_reserve reserve)
+ enum bch_watermark watermark)
{
return max_t(s64, 0,
usage.d[BCH_DATA_free].buckets -
ca->nr_open_buckets -
- bch2_dev_buckets_reserved(ca, reserve));
+ bch2_dev_buckets_reserved(ca, watermark));
}
static inline u64 __dev_buckets_available(struct bch_dev *ca,
struct bch_dev_usage usage,
- enum alloc_reserve reserve)
+ enum bch_watermark watermark)
{
return max_t(s64, 0,
usage.d[BCH_DATA_free].buckets
+ usage.d[BCH_DATA_need_gc_gens].buckets
+ usage.d[BCH_DATA_need_discard].buckets
- ca->nr_open_buckets
- - bch2_dev_buckets_reserved(ca, reserve));
+ - bch2_dev_buckets_reserved(ca, watermark));
}
static inline u64 dev_buckets_available(struct bch_dev *ca,
- enum alloc_reserve reserve)
+ enum bch_watermark watermark)
{
- return __dev_buckets_available(ca, bch2_dev_usage_read(ca), reserve);
+ return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
}
/* Filesystem usage: */
int bch2_trans_mark_reservation(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
int bch2_trans_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+#define mem_trigger_run_insert_then_overwrite(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
+({ \
+ int ret = 0; \
+ \
+ if (_new.k->type) \
+ ret = _fn(_trans, _btree_id, _level, _new, _flags & ~BTREE_TRIGGER_OVERWRITE); \
+ if (_old.k->type && !ret) \
+ ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \
+ ret; \
+})
+
+#define trigger_run_insert_then_overwrite(_fn, _trans, _btree_id, _level, _old, _new, _flags) \
+ mem_trigger_run_insert_then_overwrite(_fn, _trans, _btree_id, _level, _old, bkey_i_to_s_c(_new), _flags)
+
void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
if (bch_chardev_major < 0)
return bch_chardev_major;
- bch_chardev_class = class_create(THIS_MODULE, "bcachefs");
+ bch_chardev_class = class_create("bcachefs");
if (IS_ERR(bch_chardev_class))
return PTR_ERR(bch_chardev_class);
bch2_compression_opt_to_type[io_opts.background_compression ?:
io_opts.compression];
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
- m->op.alloc_reserve = RESERVE_movinggc;
+ m->op.alloc_reserve = BCH_WATERMARK_copygc;
bkey_for_each_ptr(ptrs, ptr)
percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
i->size = size;
i->ret = 0;
- bch2_trans_init(&trans, i->c, 0, 0);
+ ret = flush_buf(i);
+ if (ret)
+ return ret;
+ bch2_trans_init(&trans, i->c, 0, 0);
ret = for_each_btree_key2(&trans, iter, i->id, i->from,
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS, k, ({
- ret = flush_buf(i);
- if (ret)
- break;
-
bch2_bkey_val_to_text(&i->buf, i->c, k);
prt_newline(&i->buf);
- 0;
+ drop_locks_do(&trans, flush_buf(i));
}));
i->from = iter.pos;
+ bch2_trans_exit(&trans);
+
if (!ret)
ret = flush_buf(i);
- bch2_trans_exit(&trans);
-
return ret ?: i->ret;
}
return i->ret;
bch2_trans_init(&trans, i->c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
for_each_btree_node(&trans, iter, i->id, i->from, 0, b, ret) {
- ret = flush_buf(i);
- if (ret)
- break;
-
bch2_btree_node_to_text(&i->buf, i->c, b);
i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
? bpos_successor(b->key.k.p)
: b->key.k.p;
+
+ ret = drop_locks_do(&trans, flush_buf(i));
+ if (ret)
+ break;
}
bch2_trans_iter_exit(&trans, &iter);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
bch2_trans_exit(&trans);
if (!ret)
struct bkey_packed *_k =
bch2_btree_node_iter_peek(&l->iter, l->b);
- ret = flush_buf(i);
- if (ret)
- break;
-
if (bpos_gt(l->b->key.k.p, i->prev_node)) {
bch2_btree_node_to_text(&i->buf, i->c, l->b);
i->prev_node = l->b->key.k.p;
}
bch2_bfloat_to_text(&i->buf, l->b, _k);
- 0;
+ drop_locks_do(&trans, flush_buf(i));
}));
i->from = iter.pos;
struct bch_fs *c = i->c;
struct btree_trans *trans;
ssize_t ret = 0;
+ u32 seq;
i->ubuf = buf;
i->size = size;
i->ret = 0;
-
- mutex_lock(&c->btree_trans_lock);
+restart:
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
if (trans->locking_wait.task->pid <= i->iter)
continue;
+ closure_get(&trans->ref);
+ seq = seqmutex_seq(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+
ret = flush_buf(i);
- if (ret)
- break;
+ if (ret) {
+ closure_put(&trans->ref);
+ goto unlocked;
+ }
bch2_btree_trans_to_text(&i->buf, trans);
prt_newline(&i->buf);
i->iter = trans->locking_wait.task->pid;
- }
- mutex_unlock(&c->btree_trans_lock);
+ closure_put(&trans->ref);
+
+ if (!seqmutex_relock(&c->btree_trans_lock, seq))
+ goto restart;
+ }
+ seqmutex_unlock(&c->btree_trans_lock);
+unlocked:
if (i->buf.allocation_failure)
ret = -ENOMEM;
struct bch_fs *c = i->c;
struct btree_trans *trans;
ssize_t ret = 0;
+ u32 seq;
i->ubuf = buf;
i->size = size;
if (i->iter)
goto out;
-
- mutex_lock(&c->btree_trans_lock);
+restart:
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
if (trans->locking_wait.task->pid <= i->iter)
continue;
+ closure_get(&trans->ref);
+ seq = seqmutex_seq(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+
ret = flush_buf(i);
- if (ret)
- break;
+ if (ret) {
+ closure_put(&trans->ref);
+ goto out;
+ }
bch2_check_for_deadlock(trans, &i->buf);
i->iter = trans->locking_wait.task->pid;
+
+ closure_put(&trans->ref);
+
+ if (!seqmutex_relock(&c->btree_trans_lock, seq))
+ goto restart;
}
- mutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
out:
if (i->buf.allocation_failure)
ret = -ENOMEM;
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "bkey_buf.h"
#include "bkey_methods.h"
#include "btree_update.h"
#include "extents.h"
struct bkey_s_c_dirent dirent;
subvol_inum target;
u32 snapshot;
+ struct bkey_buf sk;
int ret;
+ bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
if (ret)
continue;
- /*
- * XXX: dir_emit() can fault and block, while we're holding
- * locks
- */
+ /* dir_emit() can fault and block: */
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ dirent = bkey_i_to_s_c_dirent(sk.k);
+ bch2_trans_unlock(&trans);
+
ctx->pos = dirent.k->p.offset;
if (!dir_emit(ctx, dirent.v->d_name,
bch2_dirent_name_bytes(dirent),
goto retry;
bch2_trans_exit(&trans);
+ bch2_bkey_buf_exit(&sk, c);
return ret;
}
ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL,
ec_stripe_delete(&trans, idx));
if (ret) {
- bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
break;
}
}
static struct ec_stripe_head *
ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
unsigned algo, unsigned redundancy,
- enum alloc_reserve reserve)
+ enum bch_watermark watermark)
{
struct ec_stripe_head *h;
struct bch_dev *ca;
h->target = target;
h->algo = algo;
h->redundancy = redundancy;
- h->reserve = reserve;
+ h->watermark = watermark;
rcu_read_lock();
h->devs = target_rw_devs(c, BCH_DATA_user, target);
unsigned target,
unsigned algo,
unsigned redundancy,
- enum alloc_reserve reserve)
+ enum bch_watermark watermark)
{
struct bch_fs *c = trans->c;
struct ec_stripe_head *h;
if (h->target == target &&
h->algo == algo &&
h->redundancy == redundancy &&
- h->reserve == reserve) {
+ h->watermark == watermark) {
ret = bch2_trans_mutex_lock(trans, &h->lock);
if (ret)
h = ERR_PTR(ret);
goto found;
}
- h = ec_new_stripe_head_alloc(c, target, algo, redundancy, reserve);
+ h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
found:
mutex_unlock(&c->ec_stripe_head_lock);
return h;
}
static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
- enum alloc_reserve reserve, struct closure *cl)
+ enum bch_watermark watermark, struct closure *cl)
{
struct bch_fs *c = trans->c;
struct bch_devs_mask devs = h->devs;
&nr_have_parity,
&have_cache, 0,
BCH_DATA_parity,
- reserve,
+ watermark,
cl);
open_bucket_for_each(c, &buckets, ob, i) {
&nr_have_data,
&have_cache, 0,
BCH_DATA_user,
- reserve,
+ watermark,
cl);
open_bucket_for_each(c, &buckets, ob, i) {
unsigned target,
unsigned algo,
unsigned redundancy,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct closure *cl)
{
struct bch_fs *c = trans->c;
bool waiting = false;
int ret;
- h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, reserve);
+ h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
if (!h)
bch_err(c, "no stripe head");
if (IS_ERR_OR_NULL(h))
goto alloc_existing;
/* First, try to allocate a full stripe: */
- ret = new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?:
+ ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
__bch2_ec_stripe_head_reserve(trans, h);
if (!ret)
goto allocate_buf;
if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
goto err;
- if (reserve == RESERVE_movinggc) {
- ret = new_stripe_alloc_buckets(trans, h, reserve, NULL) ?:
+ if (watermark == BCH_WATERMARK_copygc) {
+ ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
__bch2_ec_stripe_head_reserve(trans, h);
if (ret)
goto err;
closure_wake_up(&c->freelist_wait);
alloc_existing:
/*
- * Retry allocating buckets, with the reserve watermark for this
+ * Retry allocating buckets, with the watermark for this
* particular write:
*/
- ret = new_stripe_alloc_buckets(trans, h, reserve, cl);
+ ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
if (ret)
goto err;
bch2_trans_exit(&trans);
if (ret)
- bch_err(c, "error reading stripes: %i", ret);
+ bch_err_fn(c, ret);
return ret;
}
list_for_each_entry(h, &c->ec_stripe_head_list, list) {
prt_printf(out, "target %u algo %u redundancy %u %s:\n",
h->target, h->algo, h->redundancy,
- bch2_alloc_reserves[h->reserve]);
+ bch2_watermarks[h->watermark]);
if (h->s)
prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
s->idx, s->nr_data, s->nr_parity,
atomic_read(&s->ref[STRIPE_REF_io]),
atomic_read(&s->ref[STRIPE_REF_stripe]),
- bch2_alloc_reserves[s->h->reserve]);
+ bch2_watermarks[s->h->watermark]);
}
mutex_unlock(&c->ec_stripe_new_lock);
}
unsigned target;
unsigned algo;
unsigned redundancy;
- enum alloc_reserve reserve;
+ enum bch_watermark watermark;
struct bch_devs_mask devs;
unsigned nr_active_devs;
void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
unsigned, unsigned, unsigned,
- enum alloc_reserve, struct closure *);
+ enum bch_watermark, struct closure *);
void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
x(0, backpointer_to_overwritten_btree_node) \
x(0, lock_fail_root_changed) \
x(0, journal_reclaim_would_deadlock) \
- x(0, fsck) \
+ x(EINVAL, fsck) \
x(BCH_ERR_fsck, fsck_fix) \
x(BCH_ERR_fsck, fsck_ignore) \
x(BCH_ERR_fsck, fsck_errors_not_fixed) \
static inline bool _bch2_err_matches(int err, int class)
{
- return err && __bch2_err_matches(err, class);
+ return err < 0 && __bch2_err_matches(err, class);
}
#define bch2_err_matches(_err, _class) \
#include <trace/events/writeback.h>
+struct folio_vec {
+ struct folio *fv_folio;
+ size_t fv_offset;
+ size_t fv_len;
+};
+
+static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
+{
+
+ struct folio *folio = page_folio(bv.bv_page);
+ size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
+ bv.bv_offset;
+ size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
+
+ return (struct folio_vec) {
+ .fv_folio = folio,
+ .fv_offset = offset,
+ .fv_len = len,
+ };
+}
+
+static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
+ struct bvec_iter iter)
+{
+ return biovec_to_foliovec(bio_iter_iovec(bio, iter));
+}
+
+#define __bio_for_each_folio(bvl, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \
+ bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
+
+/**
+ * bio_for_each_folio - iterate over folios within a bio
+ *
+ * Like other non-_all versions, this iterates over what bio->bi_iter currently
+ * points to. This version is for drivers, where the bio may have previously
+ * been split or cloned.
+ */
+#define bio_for_each_folio(bvl, bio, iter) \
+ __bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
+
/*
* Use u64 for the end pos and sector helpers because if the folio covers the
* max supported range of the mapping, the start offset of the next folio
break;
f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
- if (!f)
+ if (IS_ERR_OR_NULL(f))
break;
BUG_ON(folios->nr && folio_pos(f) != pos);
static void bch2_readpages_end_io(struct bio *bio)
{
- struct bvec_iter_all iter;
- struct folio_vec fv;
+ struct folio_iter fi;
- bio_for_each_folio_all(fv, bio, iter) {
+ bio_for_each_folio_all(fi, bio) {
if (!bio->bi_status) {
- folio_mark_uptodate(fv.fv_folio);
+ folio_mark_uptodate(fi.folio);
} else {
- folio_clear_uptodate(fv.fv_folio);
- folio_set_error(fv.fv_folio);
+ folio_clear_uptodate(fi.folio);
+ folio_set_error(fi.folio);
}
- folio_unlock(fv.fv_folio);
+ folio_unlock(fi.folio);
}
bio_put(bio);
container_of(op, struct bch_writepage_io, op);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
- struct bvec_iter_all iter;
- struct folio_vec fv;
+ struct folio_iter fi;
unsigned i;
if (io->op.error) {
set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
- bio_for_each_folio_all(fv, bio, iter) {
+ bio_for_each_folio_all(fi, bio) {
struct bch_folio *s;
- folio_set_error(fv.fv_folio);
- mapping_set_error(fv.fv_folio->mapping, -EIO);
+ folio_set_error(fi.folio);
+ mapping_set_error(fi.folio->mapping, -EIO);
- s = __bch2_folio(fv.fv_folio);
+ s = __bch2_folio(fi.folio);
spin_lock(&s->lock);
- for (i = 0; i < folio_sectors(fv.fv_folio); i++)
+ for (i = 0; i < folio_sectors(fi.folio); i++)
s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
- bio_for_each_folio_all(fv, bio, iter) {
+ bio_for_each_folio_all(fi, bio) {
struct bch_folio *s;
- s = __bch2_folio(fv.fv_folio);
+ s = __bch2_folio(fi.folio);
spin_lock(&s->lock);
- for (i = 0; i < folio_sectors(fv.fv_folio); i++)
+ for (i = 0; i < folio_sectors(fi.folio); i++)
s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
*/
i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
- bio_for_each_folio_all(fv, bio, iter) {
- struct bch_folio *s = __bch2_folio(fv.fv_folio);
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s = __bch2_folio(fi.folio);
if (atomic_dec_and_test(&s->write_count))
- folio_end_writeback(fv.fv_folio);
+ folio_end_writeback(fi.folio);
}
bio_put(&io->op.wbio.bio);
folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
mapping_gfp_mask(mapping));
- if (!folio)
+ if (IS_ERR_OR_NULL(folio))
goto err_unlock;
if (folio_test_uptodate(folio))
static void bch2_dio_write_loop_async(struct bch_write_op *);
static __always_inline long bch2_dio_write_done(struct dio_write *dio);
+/*
+ * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
+ * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
+ * caller's stack, we're not guaranteed that it will live for the duration of
+ * the IO:
+ */
static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
{
struct iovec *iov = dio->inline_vecs;
+ /*
+ * iov_iter has a single embedded iovec - nothing to do:
+ */
+ if (iter_is_ubuf(&dio->iter))
+ return 0;
+
+ /*
+ * We don't currently handle non-iovec iov_iters here - return an error,
+ * and we'll fall back to doing the IO synchronously:
+ */
+ if (!iter_is_iovec(&dio->iter))
+ return -1;
+
if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
GFP_KERNEL);
dio->free_iov = true;
}
- memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
- dio->iter.iov = iov;
+ memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
+ dio->iter.__iov = iov;
return 0;
}
bch2_pagecache_block_put(inode);
if (dio->free_iov)
- kfree(dio->iter.iov);
+ kfree(dio->iter.__iov);
ret = dio->op.error ?: ((long) dio->written << 9);
bio_put(&dio->op.wbio.bio);
mutex_unlock(&inode->ei_quota_lock);
}
- if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) {
- struct bvec_iter_all iter;
- struct folio_vec fv;
-
- bio_for_each_folio_all(fv, bio, iter)
- folio_put(fv.fv_folio);
- }
+ bio_release_pages(bio, false);
if (unlikely(dio->op.error))
set_bit(EI_INODE_ERROR, &inode->ei_flags);
err:
dio->op.error = ret;
- if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
- struct bvec_iter_all iter;
- struct folio_vec fv;
-
- bio_for_each_folio_all(fv, bio, iter)
- folio_put(fv.fv_folio);
- }
+ bio_release_pages(bio, false);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
goto out;
u64 end_pos;
folio = filemap_lock_folio(mapping, index);
- if (!folio) {
+ if (IS_ERR_OR_NULL(folio)) {
/*
* XXX: we're doing two index lookups when we end up reading the
* folio
folio = __filemap_get_folio(mapping, index,
FGP_LOCK|FGP_CREAT, GFP_KERNEL);
- if (unlikely(!folio)) {
+ if (unlikely(IS_ERR_OR_NULL(folio))) {
ret = -ENOMEM;
goto out;
}
bool ret = true;
folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
- if (!folio)
+ if (IS_ERR_OR_NULL(folio))
return true;
s = bch2_folio(folio);
cur.k->k.p.offset += cur.k->k.size;
if (have_extent) {
+ bch2_trans_unlock(&trans);
ret = bch2_fill_extent(c, info,
bkey_i_to_s_c(prev.k), 0);
if (ret)
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
- if (!ret && have_extent)
+ if (!ret && have_extent) {
+ bch2_trans_unlock(&trans);
ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k),
FIEMAP_EXTENT_LAST);
+ }
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&cur, c);
.mmap = bch2_mmap,
.open = generic_file_open,
.fsync = bch2_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = bch2_fallocate_dispatch,
.unlocked_ioctl = bch2_fs_file_ioctl,
struct bch_inode_unpacked *inode,
u32 snapshot)
{
- struct btree_iter iter;
- int ret;
+ struct bkey_inode_buf *inode_p =
+ bch2_trans_kmalloc(trans, sizeof(*inode_p));
- bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
- SPOS(0, inode->bi_inum, snapshot),
- BTREE_ITER_INTENT);
+ if (IS_ERR(inode_p))
+ return PTR_ERR(inode_p);
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_inode_write(trans, &iter, inode);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ bch2_inode_pack(inode_p, inode);
+ inode_p->inode.k.p.snapshot = snapshot;
+
+ return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
+ &inode_p->inode.k_i,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
}
static int write_inode(struct btree_trans *trans,
bch2_trans_iter_exit(trans, &iter);
err:
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
}));
}
-static int __walk_inode(struct btree_trans *trans,
- struct inode_walker *w, struct bpos pos)
+static int get_inodes_all_snapshots(struct btree_trans *trans,
+ struct inode_walker *w, u64 inum)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
u32 restart_count = trans->restart_count;
- unsigned i;
int ret;
- pos.snapshot = bch2_snapshot_equiv(c, pos.snapshot);
-
- if (pos.inode == w->cur_inum)
- goto lookup_snapshot;
+ if (w->cur_inum == inum)
+ return 0;
w->inodes.nr = 0;
- for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, pos.inode),
+ for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, inum),
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
- if (k.k->p.offset != pos.inode)
+ if (k.k->p.offset != inum)
break;
if (bkey_is_inode(k.k))
if (ret)
return ret;
- w->cur_inum = pos.inode;
+ w->cur_inum = inum;
w->first_this_inode = true;
if (trans_was_restarted(trans, restart_count))
return -BCH_ERR_transaction_restart_nested;
-lookup_snapshot:
- for (i = 0; i < w->inodes.nr; i++)
- if (bch2_snapshot_is_ancestor(c, pos.snapshot, w->inodes.data[i].snapshot))
+ return 0;
+}
+
+static struct inode_walker_entry *
+lookup_inode_for_snapshot(struct bch_fs *c,
+ struct inode_walker *w, u32 snapshot)
+{
+ struct inode_walker_entry *i;
+
+ snapshot = bch2_snapshot_equiv(c, snapshot);
+
+ darray_for_each(w->inodes, i)
+ if (bch2_snapshot_is_ancestor(c, snapshot, i->snapshot))
goto found;
- return INT_MAX;
+
+ return NULL;
found:
- BUG_ON(pos.snapshot > w->inodes.data[i].snapshot);
+ BUG_ON(snapshot > i->snapshot);
- if (pos.snapshot != w->inodes.data[i].snapshot) {
- struct inode_walker_entry e = w->inodes.data[i];
+ if (snapshot != i->snapshot) {
+ struct inode_walker_entry new = *i;
+ int ret;
- e.snapshot = pos.snapshot;
- e.count = 0;
+ new.snapshot = snapshot;
+ new.count = 0;
bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u",
- pos.inode, pos.snapshot, w->inodes.data[i].snapshot);
+ w->cur_inum, snapshot, i->snapshot);
- while (i && w->inodes.data[i - 1].snapshot > pos.snapshot)
+ while (i > w->inodes.data && i[-1].snapshot > snapshot)
--i;
- ret = darray_insert_item(&w->inodes, i, e);
+ ret = darray_insert_item(&w->inodes, i - w->inodes.data, new);
if (ret)
- return ret;
+ return ERR_PTR(ret);
}
return i;
}
+static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
+ struct inode_walker *w, struct bpos pos)
+{
+ int ret = get_inodes_all_snapshots(trans, w, pos.inode);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return lookup_inode_for_snapshot(trans->c, w, pos.snapshot);
+}
+
static int __get_visible_inodes(struct btree_trans *trans,
struct inode_walker *w,
struct snapshots_seen *s,
err:
fsck_err:
if (ret)
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
bch2_trans_exit(&trans);
snapshots_seen_exit(&s);
if (ret)
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
}
fsck_err:
if (ret)
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
if (!ret && trans_was_restarted(trans, restart_count))
ret = -BCH_ERR_transaction_restart_nested;
return ret;
typedef DARRAY(struct extent_end) extent_ends;
+static int get_print_extent(struct btree_trans *trans, struct bpos pos, struct printbuf *out)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_extents, pos,
+ BTREE_ITER_SLOTS|
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_NOT_EXTENTS);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ bch2_bkey_val_to_text(out, trans->c, k);
+ bch2_trans_iter_exit(trans, &iter);
+ return 0;
+}
+
static int check_overlapping_extents(struct btree_trans *trans,
struct snapshots_seen *seen,
extent_ends *extent_ends,
i->snapshot, &i->seen))
continue;
- if (fsck_err_on(i->offset > bkey_start_offset(k.k), c,
- "overlapping extents: extent in snapshot %u ends at %llu overlaps with\n%s",
- i->snapshot,
- i->offset,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (i->offset <= bkey_start_offset(k.k))
+ continue;
+
+ printbuf_reset(&buf);
+ prt_str(&buf, "overlapping extents:\n ");
+ bch2_bkey_val_to_text(&buf, c, k);
+ prt_str(&buf, "\n ");
+
+ ret = get_print_extent(trans, SPOS(k.k->p.inode, i->offset, i->snapshot), &buf);
+ if (ret)
+ break;
+
+ if (fsck_err(c, buf.buf)) {
struct bkey_i *update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
if ((ret = PTR_ERR_OR_ZERO(update)))
goto err;
bkey_reassemble(update, k);
- ret = bch2_trans_update_extent(trans, iter, update, 0);
+ ret = bch2_trans_update_extent(trans, iter, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
if (ret)
goto err;
}
if (ret)
goto err;
- ret = __walk_inode(trans, inode, equiv);
- if (ret < 0)
+ i = walk_inode(trans, inode, equiv);
+ ret = PTR_ERR_OR_ZERO(i);
+ if (ret)
goto err;
- if (fsck_err_on(ret == INT_MAX, c,
+ if (fsck_err_on(!i, c,
"extent in missing inode:\n %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
goto out;
}
- if (ret == INT_MAX) {
- ret = 0;
+ if (!i)
goto out;
- }
-
- i = inode->inodes.data + ret;
- ret = 0;
if (fsck_err_on(!S_ISREG(i->inode.bi_mode) &&
!S_ISLNK(i->inode.bi_mode), c,
printbuf_exit(&buf);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
snapshots_seen_exit(&s);
if (ret)
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
}
fsck_err:
if (ret)
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
if (!ret && trans_was_restarted(trans, restart_count))
ret = -BCH_ERR_transaction_restart_nested;
return ret;
printbuf_exit(&buf);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
BUG_ON(!iter->path->should_be_locked);
- ret = __walk_inode(trans, dir, equiv);
+ i = walk_inode(trans, dir, equiv);
+ ret = PTR_ERR_OR_ZERO(i);
if (ret < 0)
goto err;
*hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
dir->first_this_inode = false;
- if (fsck_err_on(ret == INT_MAX, c,
+ if (fsck_err_on(!i, c,
"dirent in nonexisting directory:\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
goto out;
}
- if (ret == INT_MAX) {
- ret = 0;
+ if (!i)
goto out;
- }
-
- i = dir->inodes.data + ret;
- ret = 0;
if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), c,
"dirent in non directory inode type %s:\n%s",
printbuf_exit(&buf);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
inode_walker_exit(&target);
if (ret)
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
struct inode_walker *inode)
{
struct bch_fs *c = trans->c;
+ struct inode_walker_entry *i;
int ret;
ret = check_key_has_snapshot(trans, iter, k);
if (ret)
return ret;
- ret = __walk_inode(trans, inode, k.k->p);
- if (ret < 0)
+ i = walk_inode(trans, inode, k.k->p);
+ ret = PTR_ERR_OR_ZERO(i);
+ if (ret)
return ret;
if (inode->first_this_inode)
*hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
inode->first_this_inode = false;
- if (fsck_err_on(ret == INT_MAX, c,
+ if (fsck_err_on(!i, c,
"xattr for missing inode %llu",
k.k->p.inode))
return bch2_btree_delete_at(trans, iter, 0);
- if (ret == INT_MAX)
+ if (!i)
return 0;
- ret = 0;
-
ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
fsck_err:
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
bch2_trans_exit(&trans);
if (ret)
- bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
noinline_for_stack
static int check_root(struct bch_fs *c)
{
+ int ret;
+
bch_verbose(c, "checking root directory");
- return bch2_trans_do(c, NULL, NULL,
+ ret = bch2_trans_do(c, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW,
check_root_trans(&trans));
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
}
struct pathbuf_entry {
}
fsck_err:
if (ret)
- bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
break;
}
bch2_trans_iter_exit(&trans, &iter);
-
+ bch2_trans_exit(&trans);
darray_exit(&path);
- bch2_trans_exit(&trans);
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
kvfree(links.d);
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
noinline_for_stack
static int fix_reflink_p(struct bch_fs *c)
{
- struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret;
bch_verbose(c, "fixing reflink_p keys");
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(&trans, iter,
+ BTREE_ID_extents, POS_MIN,
+ BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ fix_reflink_p_key(&trans, &iter, k)));
- ret = for_each_btree_key_commit(&trans, iter,
- BTREE_ID_extents, POS_MIN,
- BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
- NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
- fix_reflink_p_key(&trans, &iter, k));
-
- bch2_trans_exit(&trans);
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
{
struct bvec_iter_all iter;
- struct bio_vec bv;
+ struct bio_vec *bv;
bio_for_each_segment_all(bv, bio, iter)
- if (bv.bv_page != ZERO_PAGE(0))
- mempool_free(bv.bv_page, &c->bio_bounce_pages);
+ if (bv->bv_page != ZERO_PAGE(0))
+ mempool_free(bv->bv_page, &c->bio_bounce_pages);
bio->bi_vcnt = 0;
}
&devs_have,
opts.data_replicas,
opts.data_replicas,
- RESERVE_none, 0, &cl, &wp);
+ BCH_WATERMARK_normal, 0, &cl, &wp);
if (ret) {
bch2_trans_unlock(trans);
closure_sync(&cl);
static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
{
- return op->alloc_reserve == RESERVE_movinggc
+ return op->alloc_reserve == BCH_WATERMARK_copygc
? op->c->copygc_wq
: op->c->btree_update_wq;
}
op->compression_type = bch2_compression_opt_to_type[opts.compression];
op->nr_replicas = 0;
op->nr_replicas_required = c->opts.data_replicas_required;
- op->alloc_reserve = RESERVE_none;
+ op->alloc_reserve = BCH_WATERMARK_normal;
op->incompressible = 0;
op->open_buckets.nr = 0;
op->devs_have.nr = 0;
break;
}
} else {
- ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, cl);
+ ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
ret = PTR_ERR_OR_ZERO(ob[nr_got]);
if (ret)
break;
}
if (ret)
- bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
unlock:
up_write(&c->state_lock);
return ret;
int bch2_dev_journal_alloc(struct bch_dev *ca)
{
unsigned nr;
+ int ret;
- if (dynamic_fault("bcachefs:add:journal_alloc"))
- return -BCH_ERR_ENOMEM_set_nr_journal_buckets;
+ if (dynamic_fault("bcachefs:add:journal_alloc")) {
+ ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
+ goto err;
+ }
/* 1/128th of the device by default: */
nr = ca->mi.nbuckets >> 7;
min(1 << 13,
(1 << 24) / ca->mi.bucket_size));
- return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
+ ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
+err:
+ if (ret)
+ bch_err_fn(ca, ret);
+ return ret;
}
/* startup/shutdown: */
EBUG_ON(lru_pos_time(k->k.p) != time);
EBUG_ON(k->k.p.offset != dev_bucket);
- return bch2_trans_update_buffered(trans, BTREE_ID_lru, k);
+ return bch2_trans_update_buffered(trans, BTREE_ID_lru, k,
+ key_type == KEY_TYPE_deleted);
}
int bch2_lru_del(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time)
int bch2_check_lrus(struct bch_fs *c)
{
- struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bpos last_flushed_pos = POS_MIN;
int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
-
- ret = for_each_btree_key_commit(&trans, iter,
- BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
- NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
- bch2_check_lru_key(&trans, &iter, k, &last_flushed_pos));
-
- bch2_trans_exit(&trans);
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(&trans, iter,
+ BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ bch2_check_lru_key(&trans, &iter, k, &last_flushed_pos)));
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
if (!bch2_bkey_has_device_c(k, dev_idx))
return 0;
- n = bch2_bkey_make_mut(trans, iter, k, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
ret = PTR_ERR_OR_ZERO(n);
if (ret)
return ret;
bch2_trans_iter_exit(trans, &iter);
if (ret) {
- bch_err(c, "%s: error looking up alloc key: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "looking up alloc key");
goto err;
}
ret = bch2_btree_write_buffer_flush(trans);
if (ret) {
- bch_err(c, "%s: error flushing btree write buffer: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "flushing btree write buffer");
goto err;
}
bch2_trans_exit(&trans);
if (ret)
- bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
bch2_btree_interior_updates_flush(c);
mutex_unlock(&c->sb_lock);
}
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
for_each_rw_member(ca, c, dev_idx) {
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
- fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) *
+ fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
ca->mi.bucket_size) >> 1);
fragmented = 0;
}
move_buckets_wait(&trans, &ctxt, &move_buckets, true);
+ rhashtable_destroy(&move_buckets.table);
bch2_trans_exit(&trans);
bch2_moving_ctxt_exit(&ctxt);
for_each_btree_key2(&trans, iter, BTREE_ID_inodes,
POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
bch2_fs_quota_read_inode(&trans, &iter, k));
- if (ret)
- bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
bch2_trans_exit(&trans);
+
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
bch2_journal_log_msg(c, "journal replay finished");
err:
kvfree(keys_sorted);
+
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
root_tree.k.p.offset = 1;
root_tree.v.master_subvol = cpu_to_le32(1);
root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
- ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees,
- &root_tree.k_i,
- NULL, NULL, 0);
bkey_snapshot_init(&root_snapshot.k_i);
root_snapshot.k.p.offset = U32_MAX;
root_snapshot.v.tree = cpu_to_le32(1);
SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
- ret = bch2_btree_insert(c, BTREE_ID_snapshots,
- &root_snapshot.k_i,
- NULL, NULL, 0);
- if (ret)
- return ret;
-
bkey_subvolume_init(&root_volume.k_i);
root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
root_volume.v.flags = 0;
root_volume.v.snapshot = cpu_to_le32(U32_MAX);
root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
- ret = bch2_btree_insert(c, BTREE_ID_subvolumes,
- &root_volume.k_i,
- NULL, NULL, 0);
+ ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees,
+ &root_tree.k_i,
+ NULL, NULL, 0) ?:
+ bch2_btree_insert(c, BTREE_ID_snapshots,
+ &root_snapshot.k_i,
+ NULL, NULL, 0) ?:
+ bch2_btree_insert(c, BTREE_ID_subvolumes,
+ &root_volume.k_i,
+ NULL, NULL, 0);
if (ret)
- return ret;
-
- return 0;
+ bch_err_fn(c, ret);
+ return ret;
}
-static int bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
+static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
{
struct btree_iter iter;
struct bkey_s_c k;
return ret;
}
+/* set bi_subvol on root inode */
+noinline_for_stack
+static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
+{
+ int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
+ __bch2_fs_upgrade_for_subvolumes(&trans));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
int bch2_fs_recovery(struct bch_fs *c)
{
- const char *err = "cannot allocate memory";
struct bch_sb_field_clean *clean = NULL;
struct jset *last_journal_entry = NULL;
u64 last_seq, blacklist_seq, journal_seq;
goto err;
}
- if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
- bch_info(c, "alloc_v2 feature bit not set, fsck required");
- c->opts.fsck = true;
- c->opts.fix_errors = FSCK_OPT_YES;
- }
-
if (!c->opts.nochanges) {
if (c->sb.version < bcachefs_metadata_required_upgrade_below) {
bch_info(c, "version %s (%u) prior to %s (%u), upgrade and fsck required",
goto err;
bch_verbose(c, "starting alloc read");
- err = "error reading allocation information";
-
down_read(&c->gc_lock);
ret = c->sb.version < bcachefs_metadata_version_bucket_gens
? bch2_alloc_read(c)
: bch2_bucket_gens_read(c);
up_read(&c->gc_lock);
-
if (ret)
goto err;
bch_verbose(c, "alloc read done");
bch_verbose(c, "starting stripes_read");
- err = "error reading stripes";
ret = bch2_stripes_read(c);
if (ret)
goto err;
bch_verbose(c, "stripes_read done");
if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
- err = "error creating root snapshot node";
ret = bch2_fs_initialize_subvolumes(c);
if (ret)
goto err;
}
bch_verbose(c, "reading snapshots table");
- err = "error reading snapshots table";
ret = bch2_fs_snapshots_start(c);
if (ret)
goto err;
bool metadata_only = c->opts.norecovery;
bch_info(c, "checking allocations");
- err = "error checking allocations";
ret = bch2_gc(c, true, metadata_only);
if (ret)
goto err;
set_bit(BCH_FS_MAY_GO_RW, &c->flags);
bch_info(c, "starting journal replay, %zu keys", c->journal_keys.nr);
- err = "journal replay failed";
ret = bch2_journal_replay(c, last_seq, blacklist_seq - 1);
if (ret)
goto err;
bch_info(c, "journal replay done");
bch_info(c, "checking need_discard and freespace btrees");
- err = "error checking need_discard and freespace btrees";
ret = bch2_check_alloc_info(c);
if (ret)
goto err;
set_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags);
bch_info(c, "checking lrus");
- err = "error checking lrus";
ret = bch2_check_lrus(c);
if (ret)
goto err;
set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
bch_info(c, "checking backpointers to alloc keys");
- err = "error checking backpointers to alloc keys";
ret = bch2_check_btree_backpointers(c);
if (ret)
goto err;
bch_verbose(c, "done checking backpointers to alloc keys");
bch_info(c, "checking backpointers to extents");
- err = "error checking backpointers to extents";
ret = bch2_check_backpointers_to_extents(c);
if (ret)
goto err;
bch_verbose(c, "done checking backpointers to extents");
bch_info(c, "checking extents to backpointers");
- err = "error checking extents to backpointers";
ret = bch2_check_extents_to_backpointers(c);
if (ret)
goto err;
set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
bch_info(c, "checking alloc to lru refs");
- err = "error checking alloc to lru refs";
ret = bch2_check_alloc_to_lru_refs(c);
if (ret)
goto err;
set_bit(BCH_FS_MAY_GO_RW, &c->flags);
bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
- err = "journal replay failed";
ret = bch2_journal_replay(c, last_seq, blacklist_seq - 1);
if (ret)
goto err;
bch_info(c, "journal replay done");
}
- err = "error initializing freespace";
ret = bch2_fs_freespace_init(c);
if (ret)
goto err;
if (c->sb.version < bcachefs_metadata_version_bucket_gens &&
c->opts.version_upgrade) {
bch_info(c, "initializing bucket_gens");
- err = "error initializing bucket gens";
ret = bch2_bucket_gens_init(c);
if (ret)
goto err;
}
if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
- /* set bi_subvol on root inode */
- err = "error upgrade root inode for subvolumes";
- ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
- bch2_fs_upgrade_for_subvolumes(&trans));
+ ret = bch2_fs_upgrade_for_subvolumes(c);
if (ret)
goto err;
}
if (c->opts.fsck) {
- bch_info(c, "starting fsck");
- err = "error in fsck";
ret = bch2_fsck_full(c);
if (ret)
goto err;
bch_verbose(c, "fsck done");
} else if (!c->sb.clean) {
bch_verbose(c, "checking for deleted inodes");
- err = "error in recovery";
ret = bch2_fsck_walk_inodes_only(c);
if (ret)
goto err;
bch2_move_stats_init(&stats, "recovery");
bch_info(c, "scanning for old btree nodes");
- ret = bch2_fs_read_write(c);
- if (ret)
- goto err;
-
- ret = bch2_scan_old_btree_nodes(c, &stats);
+ ret = bch2_fs_read_write(c) ?:
+ bch2_scan_old_btree_nodes(c, &stats);
if (ret)
goto err;
bch_info(c, "scanning for old btree nodes done");
}
if (ret)
- bch_err(c, "Error in recovery: %s (%s)", err, bch2_err_str(ret));
+ bch_err_fn(c, ret);
else
bch_verbose(c, "ret %s", bch2_err_str(ret));
return ret;
struct bch_inode_unpacked root_inode, lostfound_inode;
struct bkey_inode_buf packed_inode;
struct qstr lostfound = QSTR("lost+found");
- const char *err = "cannot allocate memory";
struct bch_dev *ca;
unsigned i;
int ret;
for_each_online_member(ca, c, i)
bch2_dev_usage_init(ca);
- err = "unable to allocate journal buckets";
for_each_online_member(ca, c, i) {
ret = bch2_dev_journal_alloc(ca);
if (ret) {
bch2_fs_journal_start(&c->journal, 1);
bch2_journal_set_replay_done(&c->journal);
- err = "error going read-write";
ret = bch2_fs_read_write_early(c);
if (ret)
goto err;
* btree updates
*/
bch_verbose(c, "marking superblocks");
- err = "error marking superblock and journal";
for_each_member_device(ca, c, i) {
ret = bch2_trans_mark_dev_sb(c, ca);
if (ret) {
ca->new_fs_bucket_idx = 0;
}
- bch_verbose(c, "initializing freespace");
- err = "error initializing freespace";
ret = bch2_fs_freespace_init(c);
if (ret)
goto err;
- err = "error creating root snapshot node";
ret = bch2_fs_initialize_subvolumes(c);
if (ret)
goto err;
bch_verbose(c, "reading snapshots table");
- err = "error reading snapshots table";
ret = bch2_fs_snapshots_start(c);
if (ret)
goto err;
bch2_inode_pack(&packed_inode, &root_inode);
packed_inode.inode.k.p.snapshot = U32_MAX;
- err = "error creating root directory";
ret = bch2_btree_insert(c, BTREE_ID_inodes,
&packed_inode.inode.k_i,
NULL, NULL, 0);
- if (ret)
+ if (ret) {
+ bch_err_msg(c, ret, "creating root directory");
goto err;
+ }
bch2_inode_init_early(c, &lostfound_inode);
- err = "error creating lost+found";
ret = bch2_trans_do(c, NULL, NULL, 0,
bch2_create_trans(&trans,
BCACHEFS_ROOT_SUBVOL_INUM,
0, 0, S_IFDIR|0700, 0,
NULL, NULL, (subvol_inum) { 0 }, 0));
if (ret) {
- bch_err(c, "error creating lost+found");
+ bch_err_msg(c, ret, "creating lost+found");
goto err;
}
goto err;
}
- err = "error writing first journal entry";
ret = bch2_journal_flush(&c->journal);
- if (ret)
+ if (ret) {
+ bch_err_msg(c, ret, "writing first journal entry");
goto err;
+ }
mutex_lock(&c->sb_lock);
SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
return 0;
err:
- pr_err("Error initializing new filesystem: %s (%s)", err, bch2_err_str(ret));
+ bch_err_fn(ca, ret);
return ret;
}
return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r);
}
+static inline void check_indirect_extent_deleting(struct bkey_i *new, unsigned *flags)
+{
+ if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
+ new->k.type = KEY_TYPE_deleted;
+ new->k.size = 0;
+ set_bkey_val_u64s(&new->k, 0);;
+ *flags &= ~BTREE_TRIGGER_INSERT;
+ }
+}
+
int bch2_trans_mark_reflink_v(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_i *new,
unsigned flags)
{
- if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
- struct bkey_i_reflink_v *r = bkey_i_to_reflink_v(new);
-
- if (!r->v.refcount) {
- r->k.type = KEY_TYPE_deleted;
- r->k.size = 0;
- set_bkey_val_u64s(&r->k, 0);
- return 0;
- }
- }
+ check_indirect_extent_deleting(new, &flags);
return bch2_trans_mark_extent(trans, btree_id, level, old, new, flags);
}
}
void bch2_indirect_inline_data_to_text(struct printbuf *out,
- struct bch_fs *c, struct bkey_s_c k)
+ struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
unsigned datalen = bkey_inline_data_bytes(k.k);
struct bkey_s_c old, struct bkey_i *new,
unsigned flags)
{
- if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
- struct bkey_i_indirect_inline_data *r =
- bkey_i_to_indirect_inline_data(new);
-
- if (!r->v.refcount) {
- r->k.type = KEY_TYPE_deleted;
- r->k.size = 0;
- set_bkey_val_u64s(&r->k, 0);
- }
- }
+ check_indirect_extent_deleting(new, &flags);
return 0;
}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SEQMUTEX_H
+#define _BCACHEFS_SEQMUTEX_H
+
+#include <linux/mutex.h>
+
+struct seqmutex {
+ struct mutex lock;
+ u32 seq;
+};
+
+#define seqmutex_init(_lock) mutex_init(&(_lock)->lock)
+
+static inline bool seqmutex_trylock(struct seqmutex *lock)
+{
+ return mutex_trylock(&lock->lock);
+}
+
+static inline void seqmutex_lock(struct seqmutex *lock)
+{
+ mutex_lock(&lock->lock);
+}
+
+static inline void seqmutex_unlock(struct seqmutex *lock)
+{
+ lock->seq++;
+ mutex_unlock(&lock->lock);
+}
+
+static inline u32 seqmutex_seq(struct seqmutex *lock)
+{
+ return lock->seq;
+}
+
+static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq)
+{
+ if (lock->seq != seq || !mutex_trylock(&lock->lock))
+ return false;
+
+ if (lock->seq != seq) {
+ mutex_unlock(&lock->lock);
+ return false;
+ }
+
+ return true;
+}
+
+#endif /* _BCACHEFS_SEQMUTEX_H */
if (ret)
goto err;
- u = bch2_bkey_make_mut_typed(trans, iter, k, 0, snapshot_tree);
+ u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
ret = PTR_ERR_OR_ZERO(u);
if (ret)
goto err;
return ret;
if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
- u = bch2_bkey_make_mut_typed(trans, &root_iter, root.s_c, 0, snapshot);
+ u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u) ?:
snapshot_tree_create(trans, root_id,
bch2_snapshot_tree_oldest_subvol(c, root_id),
}
if (s->k->p.snapshot != root_id) {
- u = bch2_bkey_make_mut_typed(trans, iter, s->s_c, 0, snapshot);
+ u = bch2_bkey_make_mut_typed(trans, iter, &s->s_c, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u);
if (ret)
goto err;
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_snapshot(&trans, &iter, k)));
if (ret)
- bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
"subvolume %llu is not set as snapshot but is not master subvolume",
k.k->p.offset)) {
struct bkey_i_subvolume *s =
- bch2_bkey_make_mut_typed(trans, iter, subvol.s_c, 0, subvolume);
+ bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
ret = PTR_ERR_OR_ZERO(s);
if (ret)
return ret;
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_subvol(&trans, &iter, k)));
if (ret)
- bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
-
+ bch_err_fn(c, ret);
return ret;
}
bch2_mark_snapshot(&trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
bch2_snapshot_set_equiv(&trans, k)));
if (ret)
- bch_err(c, "error starting snapshots: %s", bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
err:
darray_exit(&deleted);
bch2_trans_exit(&trans);
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
le32_to_cpu(bkey_s_c_to_subvolume(k).v->parent) != old_parent)
return 0;
- s = bch2_bkey_make_mut_typed(trans, iter, k, 0, subvolume);
+ s = bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume);
ret = PTR_ERR_OR_ZERO(s);
if (ret)
return ret;
u64 offset, prev_offset, max_sectors;
unsigned i;
- if (uuid_le_cmp(layout->magic, BCACHE_MAGIC) &&
- uuid_le_cmp(layout->magic, BCHFS_MAGIC)) {
+ if (!uuid_equal(&layout->magic, &BCACHE_MAGIC) &&
+ !uuid_equal(&layout->magic, &BCHFS_MAGIC)) {
prt_printf(out, "Not a bcachefs superblock layout");
return -BCH_ERR_invalid_sb_layout;
}
return -BCH_ERR_invalid_sb_block_size;
}
- if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le))) {
+ if (bch2_is_zero(sb->user_uuid.b, sizeof(sb->user_uuid))) {
prt_printf(out, "Bad user UUID (got zeroes)");
return -BCH_ERR_invalid_sb_uuid;
}
- if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le))) {
+ if (bch2_is_zero(sb->uuid.b, sizeof(sb->uuid))) {
prt_printf(out, "Bad intenal UUID (got zeroes)");
return -BCH_ERR_invalid_sb_uuid;
}
return ret;
}
- if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC) &&
- uuid_le_cmp(sb->sb->magic, BCHFS_MAGIC)) {
+ if (!uuid_equal(&sb->sb->magic, &BCACHE_MAGIC) &&
+ !uuid_equal(&sb->sb->magic, &BCHFS_MAGIC)) {
prt_printf(err, "Not a bcachefs superblock");
return -BCH_ERR_invalid_sb_magic;
}
static inline bool bch2_member_exists(struct bch_member *m)
{
- return !bch2_is_zero(m->uuid.b, sizeof(uuid_le));
+ return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
}
static inline bool bch2_dev_exists(struct bch_sb *sb,
? BCH_MEMBER_DURABILITY(mi) - 1
: 1,
.freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
- .valid = !bch2_is_zero(mi->uuid.b, sizeof(uuid_le)),
+ .valid = bch2_member_exists(mi),
};
}
return c;
}
-static struct bch_fs *__bch2_uuid_to_fs(uuid_le uuid)
+static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
{
struct bch_fs *c;
lockdep_assert_held(&bch_fs_list_lock);
list_for_each_entry(c, &bch_fs_list, list)
- if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid_le)))
+ if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
return c;
return NULL;
}
-struct bch_fs *bch2_uuid_to_fs(uuid_le uuid)
+struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
{
struct bch_fs *c;
le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
struct bch_sb_field_members *mi = bch2_sb_get_members(newest);
- if (uuid_le_cmp(fs->uuid, sb->uuid))
+ if (!uuid_equal(&fs->uuid, &sb->uuid))
return -BCH_ERR_device_not_a_member_of_filesystem;
if (!bch2_dev_exists(newest, mi, sb->dev_idx))
}
struct bch_fs *bch2_dev_to_fs(dev_t);
-struct bch_fs *bch2_uuid_to_fs(uuid_le);
+struct bch_fs *bch2_uuid_to_fs(__uuid_t);
bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
enum bch_member_state, int);
{
struct btree_trans *trans;
- mutex_lock(&c->btree_trans_lock);
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
six_lock_wakeup_all(&b->lock);
}
- mutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
}
SHOW(bch2_fs)
prt_printf(out, "reserves:");
prt_newline(out);
- for (i = 0; i < RESERVE_NR; i++) {
- prt_str(out, bch2_alloc_reserves[i]);
+ for (i = 0; i < BCH_WATERMARK_NR; i++) {
+ prt_str(out, bch2_watermarks[i]);
prt_tab(out);
prt_u64(out, bch2_dev_buckets_reserved(ca, i));
prt_tab_rjust(out);
bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(&trans, &iter, &k.k_i, 0));
if (ret) {
- bch_err(c, "%s(): update error in: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "update error");
goto err;
}
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(&trans, &iter, 0));
if (ret) {
- bch_err(c, "%s(): delete error (first): %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "delete error (first)");
goto err;
}
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(&trans, &iter, 0));
if (ret) {
- bch_err(c, "%s(): delete error (second): %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "delete error (second)");
goto err;
}
err:
bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(&trans, &iter, &k.k_i, 0));
if (ret) {
- bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "update error");
goto err;
}
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(&trans, &iter, 0));
if (ret) {
- bch_err(c, "%s(): delete error: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "delete error");
goto err;
}
err:
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
- bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "insert error");
goto err;
}
}
0;
}));
if (ret) {
- bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "error iterating forwards");
goto err;
}
0;
}));
if (ret) {
- bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "error iterating backwards");
goto err;
}
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
- bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "insert error");
goto err;
}
}
0;
}));
if (ret) {
- bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "error iterating forwards");
goto err;
}
0;
}));
if (ret) {
- bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "error iterating backwards");
goto err;
}
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
- bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "insert error");
goto err;
}
}
0;
}));
if (ret) {
- bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "error iterating forwards");
goto err;
}
0;
}));
if (ret < 0) {
- bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "error iterating forwards by slots");
goto err;
}
ret = 0;
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
- bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "insert error");
goto err;
}
}
0;
}));
if (ret) {
- bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "error iterating forwards");
goto err;
}
0;
}));
if (ret) {
- bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "error iterating forwards by slots");
goto err;
}
ret = 0;
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret)
- bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
ret = test_snapshot_filter(c, snapids[0], snapids[1]);
if (ret) {
- bch_err(c, "%s(): err from test_snapshot_filter: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "from test_snapshot_filter");
return ret;
}
k = bch2_btree_iter_peek(iter);
ret = bkey_err(k);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(trans->c, "%s(): lookup error: %s", __func__, bch2_err_str(ret));
+ bch_err_msg(trans->c, ret, "lookup error");
if (ret)
return ret;
#include <linux/uuid.h>
-static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
-{
- return memcmp(&u1, &u2, sizeof(uuid_le));
-}
-
#endif /* _BCACHEFS_UTIL_H */
const struct xattr_handler *bch2_xattr_handlers[] = {
&bch_xattr_user_handler,
#ifdef CONFIG_BCACHEFS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
+ &nop_posix_acl_access,
+ &nop_posix_acl_default,
#endif
&bch_xattr_trusted_handler,
&bch_xattr_security_handler,
static const struct xattr_handler *bch_xattr_handler_map[] = {
[KEY_TYPE_XATTR_INDEX_USER] = &bch_xattr_user_handler,
[KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS] =
- &posix_acl_access_xattr_handler,
+ &nop_posix_acl_access,
[KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT] =
- &posix_acl_default_xattr_handler,
+ &nop_posix_acl_default,
[KEY_TYPE_XATTR_INDEX_TRUSTED] = &bch_xattr_trusted_handler,
[KEY_TYPE_XATTR_INDEX_SECURITY] = &bch_xattr_security_handler,
};
void bio_free_pages(struct bio *bio)
{
struct bvec_iter_all iter;
- struct bio_vec bvec;
+ struct bio_vec *bvec;
bio_for_each_segment_all(bvec, bio, iter)
- __free_page(bvec.bv_page);
+ __free_page(bvec->bv_page);
}
void bio_advance(struct bio *bio, unsigned bytes)
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
-const struct xattr_handler posix_acl_access_xattr_handler = {
+const struct xattr_handler nop_posix_acl_access = {
.name = XATTR_NAME_POSIX_ACL_ACCESS,
.flags = ACL_TYPE_ACCESS,
};
-const struct xattr_handler posix_acl_default_xattr_handler = {
+const struct xattr_handler nop_posix_acl_default = {
.name = XATTR_NAME_POSIX_ACL_DEFAULT,
.flags = ACL_TYPE_DEFAULT,
};