1 #ifndef _BCACHEFS_JOURNAL_H
2 #define _BCACHEFS_JOURNAL_H
7 * The primary purpose of the journal is to log updates (insertions) to the
8 * b-tree, to avoid having to do synchronous updates to the b-tree on disk.
10 * Without the journal, the b-tree is always internally consistent on
11 * disk - and in fact, in the earliest incarnations bcache didn't have a journal
12 * but did handle unclean shutdowns by doing all index updates synchronously
15 * Updates to interior nodes still happen synchronously and without the journal
16 * (for simplicity) - this may change eventually but updates to interior nodes
17 * are rare enough it's not a huge priority.
19 * This means the journal is relatively separate from the b-tree; it consists of
20 * just a list of keys and journal replay consists of just redoing those
21 * insertions in same order that they appear in the journal.
25 * For synchronous updates (where we're waiting on the index update to hit
26 * disk), the journal entry will be written out immediately (or as soon as
27 * possible, if the write for the previous journal entry was still in flight).
29 * Synchronous updates are specified by passing a closure (@flush_cl) to
30 * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
31 * down to the journalling code. That closure will will wait on the journal
32 * write to complete (via closure_wait()).
34 * If the index update wasn't synchronous, the journal entry will be
35 * written out after 10 ms have elapsed, by default (the delay_ms field
40 * A journal entry is variable size (struct jset), it's got a fixed length
41 * header and then a variable number of struct jset_entry entries.
43 * Journal entries are identified by monotonically increasing 64 bit sequence
44 * numbers - jset->seq; other places in the code refer to this sequence number.
46 * A jset_entry entry contains one or more bkeys (which is what gets inserted
47 * into the b-tree). We need a container to indicate which b-tree the key is
48 * for; also, the roots of the various b-trees are stored in jset_entry entries
49 * (one for each b-tree) - this lets us add new b-tree types without changing
52 * We also keep some things in the journal header that are logically part of the
53 * superblock - all the things that are frequently updated. This is for future
54 * bcache on raw flash support; the superblock (which will become another
55 * journal) can't be moved or wear leveled, so it contains just enough
56 * information to find the main journal, and the superblock only has to be
57 * rewritten when we want to move/wear level the main journal.
59 * JOURNAL LAYOUT ON DISK:
61 * The journal is written to a ringbuffer of buckets (which is kept in the
62 * superblock); the individual buckets are not necessarily contiguous on disk
63 * which means that journal entries are not allowed to span buckets, but also
64 * that we can resize the journal at runtime if desired (unimplemented).
66 * The journal buckets exist in the same pool as all the other buckets that are
67 * managed by the allocator and garbage collection - garbage collection marks
68 * the journal buckets as metadata buckets.
70 * OPEN/DIRTY JOURNAL ENTRIES:
72 * Open/dirty journal entries are journal entries that contain b-tree updates
73 * that have not yet been written out to the b-tree on disk. We have to track
74 * which journal entries are dirty, and we also have to avoid wrapping around
75 * the journal and overwriting old but still dirty journal entries with new
78 * On disk, this is represented with the "last_seq" field of struct jset;
79 * last_seq is the first sequence number that journal replay has to replay.
81 * To avoid overwriting dirty journal entries on disk, we keep a mapping (in
82 * journal_device->seq) of for each journal bucket, the highest sequence number
83 * any journal entry it contains. Then, by comparing that against last_seq we
84 * can determine whether that journal bucket contains dirty journal entries or
87 * To track which journal entries are dirty, we maintain a fifo of refcounts
88 * (where each entry corresponds to a specific sequence number) - when a ref
89 * goes to 0, that journal entry is no longer dirty.
91 * Journalling of index updates is done at the same time as the b-tree itself is
92 * being modified (see btree_insert_key()); when we add the key to the journal
93 * the pending b-tree write takes a ref on the journal entry the key was added
94 * to. If a pending b-tree write would need to take refs on multiple dirty
95 * journal entries, it only keeps the ref on the oldest one (since a newer
96 * journal entry will still be replayed if an older entry was dirty).
100 * There are two ways the journal could fill up; either we could run out of
101 * space to write to, or we could have too many open journal entries and run out
102 * of room in the fifo of refcounts. Since those refcounts are decremented
103 * without any locking we can't safely resize that fifo, so we handle it the
106 * If the journal fills up, we start flushing dirty btree nodes until we can
107 * allocate space for a journal write again - preferentially flushing btree
108 * nodes that are pinning the oldest journal entries first.
111 #include <linux/hash.h>
113 #include "journal_types.h"
117 static inline void journal_wake(struct journal *j)
120 closure_wake_up(&j->async_wait);
121 closure_wake_up(&j->preres_wait);
124 static inline struct journal_buf *journal_cur_buf(struct journal *j)
126 return j->buf + j->reservations.idx;
129 static inline struct journal_buf *journal_prev_buf(struct journal *j)
131 return j->buf + !j->reservations.idx;
134 /* Sequence number of oldest dirty journal entry */
136 static inline u64 journal_last_seq(struct journal *j)
141 static inline u64 journal_cur_seq(struct journal *j)
143 BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
145 return j->pin.back - 1;
148 u64 bch2_inode_journal_seq(struct journal *, u64);
150 static inline int journal_state_count(union journal_res_state s, int idx)
152 return idx == 0 ? s.buf0_count : s.buf1_count;
155 static inline void journal_state_inc(union journal_res_state *s)
157 s->buf0_count += s->idx == 0;
158 s->buf1_count += s->idx == 1;
161 static inline void bch2_journal_set_has_inode(struct journal *j,
162 struct journal_res *res,
165 struct journal_buf *buf = &j->buf[res->idx];
166 unsigned long bit = hash_64(inum, ilog2(sizeof(buf->has_inode) * 8));
168 /* avoid atomic op if possible */
169 if (unlikely(!test_bit(bit, buf->has_inode)))
170 set_bit(bit, buf->has_inode);
174 * Amount of space that will be taken up by some keys in the journal (i.e.
175 * including the jset header)
177 static inline unsigned jset_u64s(unsigned u64s)
179 return u64s + sizeof(struct jset_entry) / sizeof(u64);
182 static inline int journal_entry_overhead(struct journal *j)
184 return sizeof(struct jset) / sizeof(u64) + j->entry_u64s_reserved;
187 static inline struct jset_entry *
188 bch2_journal_add_entry_noreservation(struct journal_buf *buf, size_t u64s)
190 struct jset *jset = buf->data;
191 struct jset_entry *entry = vstruct_idx(jset, le32_to_cpu(jset->u64s));
193 memset(entry, 0, sizeof(*entry));
194 entry->u64s = cpu_to_le16(u64s);
196 le32_add_cpu(&jset->u64s, jset_u64s(u64s));
201 static inline void bch2_journal_add_entry(struct journal *j, struct journal_res *res,
202 unsigned type, enum btree_id id,
204 const void *data, unsigned u64s)
206 struct journal_buf *buf = &j->buf[res->idx];
207 struct jset_entry *entry = vstruct_idx(buf->data, res->offset);
208 unsigned actual = jset_u64s(u64s);
211 EBUG_ON(actual > res->u64s);
213 res->offset += actual;
216 memset(entry, 0, sizeof(*entry));
217 entry->u64s = cpu_to_le16(u64s);
219 entry->btree_id = id;
220 entry->level = level;
221 memcpy_u64s(entry->_data, data, u64s);
224 static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res,
225 enum btree_id id, const struct bkey_i *k)
227 bch2_journal_add_entry(j, res, BCH_JSET_ENTRY_btree_keys,
228 id, 0, k, k->k.u64s);
231 void __bch2_journal_buf_put(struct journal *, bool);
233 static inline void bch2_journal_buf_put(struct journal *j, unsigned idx,
234 bool need_write_just_set)
236 union journal_res_state s;
238 s.v = atomic64_sub_return(((union journal_res_state) {
239 .buf0_count = idx == 0,
240 .buf1_count = idx == 1,
241 }).v, &j->reservations.counter);
242 if (!journal_state_count(s, idx)) {
243 EBUG_ON(s.idx == idx || !s.prev_buf_unwritten);
244 __bch2_journal_buf_put(j, need_write_just_set);
249 * This function releases the journal write structure so other threads can
250 * then proceed to add their keys as well.
252 static inline void bch2_journal_res_put(struct journal *j,
253 struct journal_res *res)
258 lock_release(&j->res_map, 0, _RET_IP_);
261 bch2_journal_add_entry(j, res,
262 BCH_JSET_ENTRY_btree_keys,
265 bch2_journal_buf_put(j, res->idx, false);
270 int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
273 #define JOURNAL_RES_GET_NONBLOCK (1 << 0)
274 #define JOURNAL_RES_GET_CHECK (1 << 1)
275 #define JOURNAL_RES_GET_RESERVED (1 << 2)
277 static inline int journal_res_get_fast(struct journal *j,
278 struct journal_res *res,
281 union journal_res_state old, new;
282 u64 v = atomic64_read(&j->reservations.counter);
288 * Check if there is still room in the current journal
291 if (new.cur_entry_offset + res->u64s > j->cur_entry_u64s)
294 EBUG_ON(!journal_state_count(new, new.idx));
296 if (!(flags & JOURNAL_RES_GET_RESERVED) &&
297 !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags))
300 if (flags & JOURNAL_RES_GET_CHECK)
303 new.cur_entry_offset += res->u64s;
304 journal_state_inc(&new);
305 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
306 old.v, new.v)) != old.v);
310 res->offset = old.cur_entry_offset;
311 res->seq = le64_to_cpu(j->buf[old.idx].data->seq);
315 static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res,
316 unsigned u64s, unsigned flags)
321 EBUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
325 if (journal_res_get_fast(j, res, flags))
328 ret = bch2_journal_res_get_slowpath(j, res, flags);
332 if (!(flags & JOURNAL_RES_GET_CHECK)) {
333 lock_acquire_shared(&j->res_map, 0, 0, NULL, _THIS_IP_);
339 /* journal_preres: */
341 static inline bool journal_check_may_get_unreserved(struct journal *j)
343 union journal_preres_state s = READ_ONCE(j->prereserved);
344 bool ret = s.reserved <= s.remaining &&
345 fifo_free(&j->pin) > 8;
347 lockdep_assert_held(&j->lock);
349 if (ret != test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
351 set_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags);
354 clear_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags);
360 static inline void bch2_journal_preres_put(struct journal *j,
361 struct journal_preres *res)
363 union journal_preres_state s = { .reserved = res->u64s };
368 s.v = atomic64_sub_return(s.v, &j->prereserved.counter);
370 closure_wake_up(&j->preres_wait);
372 if (s.reserved <= s.remaining &&
373 !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
375 journal_check_may_get_unreserved(j);
376 spin_unlock(&j->lock);
380 int __bch2_journal_preres_get(struct journal *,
381 struct journal_preres *, unsigned);
383 static inline int bch2_journal_preres_get_fast(struct journal *j,
384 struct journal_preres *res,
387 int d = new_u64s - res->u64s;
388 union journal_preres_state old, new;
389 u64 v = atomic64_read(&j->prereserved.counter);
396 if (new.reserved > new.remaining)
398 } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
399 old.v, new.v)) != old.v);
405 static inline int bch2_journal_preres_get(struct journal *j,
406 struct journal_preres *res,
410 if (new_u64s <= res->u64s)
413 if (bch2_journal_preres_get_fast(j, res, new_u64s))
416 if (flags & JOURNAL_RES_GET_NONBLOCK)
419 return __bch2_journal_preres_get(j, res, new_u64s);
422 /* journal_entry_res: */
424 void bch2_journal_entry_res_resize(struct journal *,
425 struct journal_entry_res *,
428 u64 bch2_journal_last_unwritten_seq(struct journal *);
429 int bch2_journal_open_seq_async(struct journal *, u64, struct closure *);
431 void bch2_journal_wait_on_seq(struct journal *, u64, struct closure *);
432 void bch2_journal_flush_seq_async(struct journal *, u64, struct closure *);
433 void bch2_journal_flush_async(struct journal *, struct closure *);
434 void bch2_journal_meta_async(struct journal *, struct closure *);
436 int bch2_journal_flush_seq(struct journal *, u64);
437 int bch2_journal_flush(struct journal *);
438 int bch2_journal_meta(struct journal *);
440 void bch2_journal_halt(struct journal *);
442 static inline int bch2_journal_error(struct journal *j)
444 return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL
450 static inline bool journal_flushes_device(struct bch_dev *ca)
455 static inline void bch2_journal_set_replay_done(struct journal *j)
457 BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
458 set_bit(JOURNAL_REPLAY_DONE, &j->flags);
461 void bch2_journal_unblock(struct journal *);
462 void bch2_journal_block(struct journal *);
464 ssize_t bch2_journal_print_debug(struct journal *, char *);
465 ssize_t bch2_journal_print_pins(struct journal *, char *);
467 int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
469 int bch2_dev_journal_alloc(struct bch_dev *);
471 void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
472 void bch2_fs_journal_stop(struct journal *);
473 void bch2_fs_journal_start(struct journal *);
474 void bch2_dev_journal_exit(struct bch_dev *);
475 int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *);
476 void bch2_fs_journal_exit(struct journal *);
477 int bch2_fs_journal_init(struct journal *);
479 #endif /* _BCACHEFS_JOURNAL_H */