X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fjournal.h;h=c85d01cf49484984d08d20a2159f84b2506f96a1;hb=97c86db4f286fef4c7c18b5b96940e64c97e31b4;hp=30de6d96188ed38012ab6a556e0b5105cc1164ba;hpb=05408b6f8fea54bf53e68a4ef24291214970f6d0;p=bcachefs-tools-debian diff --git a/libbcachefs/journal.h b/libbcachefs/journal.h index 30de6d9..c85d01c 100644 --- a/libbcachefs/journal.h +++ b/libbcachefs/journal.h @@ -29,8 +29,8 @@ * * Synchronous updates are specified by passing a closure (@flush_cl) to * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter - * down to the journalling code. That closure will will wait on the journal - * write to complete (via closure_wait()). + * down to the journalling code. That closure will wait on the journal write to + * complete (via closure_wait()). * * If the index update wasn't synchronous, the journal entry will be * written out after 10 ms have elapsed, by default (the delay_ms field @@ -127,11 +127,6 @@ static inline struct journal_buf *journal_cur_buf(struct journal *j) return j->buf + j->reservations.idx; } -static inline struct journal_buf *journal_prev_buf(struct journal *j) -{ - return j->buf + !j->reservations.idx; -} - /* Sequence number of oldest dirty journal entry */ static inline u64 journal_last_seq(struct journal *j) @@ -141,34 +136,33 @@ static inline u64 journal_last_seq(struct journal *j) static inline u64 journal_cur_seq(struct journal *j) { - BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq)); + EBUG_ON(j->pin.back - 1 != atomic64_read(&j->seq)); return j->pin.back - 1; } -u64 bch2_inode_journal_seq(struct journal *, u64); +static inline u64 journal_last_unwritten_seq(struct journal *j) +{ + return j->seq_ondisk + 1; +} static inline int journal_state_count(union journal_res_state s, int idx) { - return idx == 0 ? s.buf0_count : s.buf1_count; + switch (idx) { + case 0: return s.buf0_count; + case 1: return s.buf1_count; + case 2: return s.buf2_count; + case 3: return s.buf3_count; + } + BUG(); } static inline void journal_state_inc(union journal_res_state *s) { s->buf0_count += s->idx == 0; s->buf1_count += s->idx == 1; -} - -static inline void bch2_journal_set_has_inode(struct journal *j, - struct journal_res *res, - u64 inum) -{ - struct journal_buf *buf = &j->buf[res->idx]; - unsigned long bit = hash_64(inum, ilog2(sizeof(buf->has_inode) * 8)); - - /* avoid atomic op if possible */ - if (unlikely(!test_bit(bit, buf->has_inode))) - set_bit(bit, buf->has_inode); + s->buf2_count += s->idx == 2; + s->buf3_count += s->idx == 3; } /* @@ -205,40 +199,44 @@ journal_res_entry(struct journal *j, struct journal_res *res) return vstruct_idx(j->buf[res->idx].data, res->offset); } -static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type, +static inline unsigned journal_entry_init(struct jset_entry *entry, unsigned type, enum btree_id id, unsigned level, - const void *data, unsigned u64s) + unsigned u64s) { - memset(entry, 0, sizeof(*entry)); entry->u64s = cpu_to_le16(u64s); - entry->type = type; entry->btree_id = id; entry->level = level; - memcpy_u64s_small(entry->_data, data, u64s); - + entry->type = type; + entry->pad[0] = 0; + entry->pad[1] = 0; + entry->pad[2] = 0; return jset_u64s(u64s); } -static inline void bch2_journal_add_entry(struct journal *j, struct journal_res *res, - unsigned type, enum btree_id id, - unsigned level, +static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type, + enum btree_id id, unsigned level, const void *data, unsigned u64s) { - unsigned actual = journal_entry_set(journal_res_entry(j, res), - type, id, level, data, u64s); + unsigned ret = journal_entry_init(entry, type, id, level, u64s); + + memcpy_u64s_small(entry->_data, data, u64s); + return ret; +} + +static inline struct jset_entry * +bch2_journal_add_entry(struct journal *j, struct journal_res *res, + unsigned type, enum btree_id id, + unsigned level, unsigned u64s) +{ + struct jset_entry *entry = journal_res_entry(j, res); + unsigned actual = journal_entry_init(entry, type, id, level, u64s); EBUG_ON(!res->ref); EBUG_ON(actual > res->u64s); res->offset += actual; res->u64s -= actual; -} - -static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res, - enum btree_id id, const struct bkey_i *k) -{ - bch2_journal_add_entry(j, res, BCH_JSET_ENTRY_btree_keys, - id, 0, k, k->k.u64s); + return entry; } static inline bool journal_entry_empty(struct jset *j) @@ -254,20 +252,42 @@ static inline bool journal_entry_empty(struct jset *j) return true; } -void __bch2_journal_buf_put(struct journal *, bool); - -static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, - bool need_write_just_set) +/* + * Drop reference on a buffer index and return true if the count has hit zero. + */ +static inline union journal_res_state journal_state_buf_put(struct journal *j, unsigned idx) { union journal_res_state s; s.v = atomic64_sub_return(((union journal_res_state) { .buf0_count = idx == 0, .buf1_count = idx == 1, + .buf2_count = idx == 2, + .buf3_count = idx == 3, }).v, &j->reservations.counter); + return s; +} + +void bch2_journal_buf_put_final(struct journal *, u64, bool); + +static inline void __bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq) +{ + union journal_res_state s; + + s = journal_state_buf_put(j, idx); + if (!journal_state_count(s, idx)) + bch2_journal_buf_put_final(j, seq, idx == s.unwritten_idx); +} + +static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq) +{ + union journal_res_state s; + + s = journal_state_buf_put(j, idx); if (!journal_state_count(s, idx)) { - EBUG_ON(s.idx == idx || !s.prev_buf_unwritten); - __bch2_journal_buf_put(j, need_write_just_set); + spin_lock(&j->lock); + bch2_journal_buf_put_final(j, seq, idx == s.unwritten_idx); + spin_unlock(&j->lock); } } @@ -286,9 +306,9 @@ static inline void bch2_journal_res_put(struct journal *j, while (res->u64s) bch2_journal_add_entry(j, res, BCH_JSET_ENTRY_btree_keys, - 0, 0, NULL, 0); + 0, 0, 0); - bch2_journal_buf_put(j, res->idx, false); + bch2_journal_buf_put(j, res->idx, res->seq); res->ref = 0; } @@ -296,10 +316,14 @@ static inline void bch2_journal_res_put(struct journal *j, int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *, unsigned); -#define JOURNAL_RES_GET_NONBLOCK (1 << 0) -#define JOURNAL_RES_GET_CHECK (1 << 1) -#define JOURNAL_RES_GET_RESERVED (1 << 2) -#define JOURNAL_RES_GET_RECLAIM (1 << 3) +/* First bits for BCH_WATERMARK: */ +enum journal_res_flags { + __JOURNAL_RES_GET_NONBLOCK = BCH_WATERMARK_BITS, + __JOURNAL_RES_GET_CHECK, +}; + +#define JOURNAL_RES_GET_NONBLOCK (1 << __JOURNAL_RES_GET_NONBLOCK) +#define JOURNAL_RES_GET_CHECK (1 << __JOURNAL_RES_GET_CHECK) static inline int journal_res_get_fast(struct journal *j, struct journal_res *res, @@ -320,15 +344,21 @@ static inline int journal_res_get_fast(struct journal *j, EBUG_ON(!journal_state_count(new, new.idx)); - if (!(flags & JOURNAL_RES_GET_RESERVED) && - !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) + if ((flags & BCH_WATERMARK_MASK) < j->watermark) return 0; - if (flags & JOURNAL_RES_GET_CHECK) - return 1; - new.cur_entry_offset += res->u64s; journal_state_inc(&new); + + /* + * If the refcount would overflow, we have to wait: + * XXX - tracepoint this: + */ + if (!journal_state_count(new, new.idx)) + return 0; + + if (flags & JOURNAL_RES_GET_CHECK) + return 1; } while ((v = atomic64_cmpxchg(&j->reservations.counter, old.v, new.v)) != old.v); @@ -365,114 +395,18 @@ out: return 0; } -/* journal_preres: */ - -static inline bool journal_check_may_get_unreserved(struct journal *j) -{ - union journal_preres_state s = READ_ONCE(j->prereserved); - bool ret = s.reserved <= s.remaining && - fifo_free(&j->pin) > 8; - - lockdep_assert_held(&j->lock); - - if (ret != test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) { - if (ret) { - set_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags); - journal_wake(j); - } else { - clear_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags); - } - } - return ret; -} - -static inline void bch2_journal_preres_put(struct journal *j, - struct journal_preres *res) -{ - union journal_preres_state s = { .reserved = res->u64s }; - - if (!res->u64s) - return; - - s.v = atomic64_sub_return(s.v, &j->prereserved.counter); - res->u64s = 0; - closure_wake_up(&j->preres_wait); - - if (s.reserved <= s.remaining && - !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) { - spin_lock(&j->lock); - journal_check_may_get_unreserved(j); - spin_unlock(&j->lock); - } -} - -int __bch2_journal_preres_get(struct journal *, - struct journal_preres *, unsigned, unsigned); - -static inline int bch2_journal_preres_get_fast(struct journal *j, - struct journal_preres *res, - unsigned new_u64s, - unsigned flags) -{ - int d = new_u64s - res->u64s; - union journal_preres_state old, new; - u64 v = atomic64_read(&j->prereserved.counter); - - do { - old.v = new.v = v; - - new.reserved += d; - - /* - * If we're being called from the journal reclaim path, we have - * to unconditionally give out the pre-reservation, there's - * nothing else sensible we can do - otherwise we'd recurse back - * into the reclaim path and deadlock: - */ - - if (!(flags & JOURNAL_RES_GET_RECLAIM) && - new.reserved > new.remaining) - return 0; - } while ((v = atomic64_cmpxchg(&j->prereserved.counter, - old.v, new.v)) != old.v); - - res->u64s += d; - return 1; -} - -static inline int bch2_journal_preres_get(struct journal *j, - struct journal_preres *res, - unsigned new_u64s, - unsigned flags) -{ - if (new_u64s <= res->u64s) - return 0; - - if (bch2_journal_preres_get_fast(j, res, new_u64s, flags)) - return 0; - - if (flags & JOURNAL_RES_GET_NONBLOCK) - return -EAGAIN; - - return __bch2_journal_preres_get(j, res, new_u64s, flags); -} - /* journal_entry_res: */ void bch2_journal_entry_res_resize(struct journal *, struct journal_entry_res *, unsigned); -u64 bch2_journal_last_unwritten_seq(struct journal *); -int bch2_journal_open_seq_async(struct journal *, u64, struct closure *); - -void bch2_journal_wait_on_seq(struct journal *, u64, struct closure *); -void bch2_journal_flush_seq_async(struct journal *, u64, struct closure *); +int bch2_journal_flush_seq_async(struct journal *, u64, struct closure *); void bch2_journal_flush_async(struct journal *, struct closure *); -void bch2_journal_meta_async(struct journal *, struct closure *); int bch2_journal_flush_seq(struct journal *, u64); int bch2_journal_flush(struct journal *); +bool bch2_journal_noflush_seq(struct journal *, u64); int bch2_journal_meta(struct journal *); void bch2_journal_halt(struct journal *); @@ -485,11 +419,6 @@ static inline int bch2_journal_error(struct journal *j) struct bch_dev; -static inline bool journal_flushes_device(struct bch_dev *ca) -{ - return true; -} - static inline void bch2_journal_set_replay_done(struct journal *j) { BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags)); @@ -499,17 +428,20 @@ static inline void bch2_journal_set_replay_done(struct journal *j) void bch2_journal_unblock(struct journal *); void bch2_journal_block(struct journal *); -ssize_t bch2_journal_print_debug(struct journal *, char *); -ssize_t bch2_journal_print_pins(struct journal *, char *); +void __bch2_journal_debug_to_text(struct printbuf *, struct journal *); +void bch2_journal_debug_to_text(struct printbuf *, struct journal *); +void bch2_journal_pins_to_text(struct printbuf *, struct journal *); +bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *); int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *, unsigned nr); int bch2_dev_journal_alloc(struct bch_dev *); +int bch2_fs_journal_alloc(struct bch_fs *); void bch2_dev_journal_stop(struct journal *, struct bch_dev *); void bch2_fs_journal_stop(struct journal *); -int bch2_fs_journal_start(struct journal *, u64, struct list_head *); +int bch2_fs_journal_start(struct journal *, u64); void bch2_dev_journal_exit(struct bch_dev *); int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *);