1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_JOURNAL_H
3 #define _BCACHEFS_JOURNAL_H
8 * The primary purpose of the journal is to log updates (insertions) to the
9 * b-tree, to avoid having to do synchronous updates to the b-tree on disk.
11 * Without the journal, the b-tree is always internally consistent on
12 * disk - and in fact, in the earliest incarnations bcache didn't have a journal
13 * but did handle unclean shutdowns by doing all index updates synchronously
16 * Updates to interior nodes still happen synchronously and without the journal
17 * (for simplicity) - this may change eventually but updates to interior nodes
18 * are rare enough it's not a huge priority.
20 * This means the journal is relatively separate from the b-tree; it consists of
21 * just a list of keys and journal replay consists of just redoing those
22 * insertions in same order that they appear in the journal.
26 * For synchronous updates (where we're waiting on the index update to hit
27 * disk), the journal entry will be written out immediately (or as soon as
28 * possible, if the write for the previous journal entry was still in flight).
30 * Synchronous updates are specified by passing a closure (@flush_cl) to
31 * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
32 * down to the journalling code. That closure will will wait on the journal
33 * write to complete (via closure_wait()).
35 * If the index update wasn't synchronous, the journal entry will be
36 * written out after 10 ms have elapsed, by default (the delay_ms field
41 * A journal entry is variable size (struct jset), it's got a fixed length
42 * header and then a variable number of struct jset_entry entries.
44 * Journal entries are identified by monotonically increasing 64 bit sequence
45 * numbers - jset->seq; other places in the code refer to this sequence number.
47 * A jset_entry entry contains one or more bkeys (which is what gets inserted
48 * into the b-tree). We need a container to indicate which b-tree the key is
49 * for; also, the roots of the various b-trees are stored in jset_entry entries
50 * (one for each b-tree) - this lets us add new b-tree types without changing
53 * We also keep some things in the journal header that are logically part of the
54 * superblock - all the things that are frequently updated. This is for future
55 * bcache on raw flash support; the superblock (which will become another
56 * journal) can't be moved or wear leveled, so it contains just enough
57 * information to find the main journal, and the superblock only has to be
58 * rewritten when we want to move/wear level the main journal.
60 * JOURNAL LAYOUT ON DISK:
62 * The journal is written to a ringbuffer of buckets (which is kept in the
63 * superblock); the individual buckets are not necessarily contiguous on disk
64 * which means that journal entries are not allowed to span buckets, but also
65 * that we can resize the journal at runtime if desired (unimplemented).
67 * The journal buckets exist in the same pool as all the other buckets that are
68 * managed by the allocator and garbage collection - garbage collection marks
69 * the journal buckets as metadata buckets.
71 * OPEN/DIRTY JOURNAL ENTRIES:
73 * Open/dirty journal entries are journal entries that contain b-tree updates
74 * that have not yet been written out to the b-tree on disk. We have to track
75 * which journal entries are dirty, and we also have to avoid wrapping around
76 * the journal and overwriting old but still dirty journal entries with new
79 * On disk, this is represented with the "last_seq" field of struct jset;
80 * last_seq is the first sequence number that journal replay has to replay.
82 * To avoid overwriting dirty journal entries on disk, we keep a mapping (in
83 * journal_device->seq) of for each journal bucket, the highest sequence number
84 * any journal entry it contains. Then, by comparing that against last_seq we
85 * can determine whether that journal bucket contains dirty journal entries or
88 * To track which journal entries are dirty, we maintain a fifo of refcounts
89 * (where each entry corresponds to a specific sequence number) - when a ref
90 * goes to 0, that journal entry is no longer dirty.
92 * Journalling of index updates is done at the same time as the b-tree itself is
93 * being modified (see btree_insert_key()); when we add the key to the journal
94 * the pending b-tree write takes a ref on the journal entry the key was added
95 * to. If a pending b-tree write would need to take refs on multiple dirty
96 * journal entries, it only keeps the ref on the oldest one (since a newer
97 * journal entry will still be replayed if an older entry was dirty).
101 * There are two ways the journal could fill up; either we could run out of
102 * space to write to, or we could have too many open journal entries and run out
103 * of room in the fifo of refcounts. Since those refcounts are decremented
104 * without any locking we can't safely resize that fifo, so we handle it the
107 * If the journal fills up, we start flushing dirty btree nodes until we can
108 * allocate space for a journal write again - preferentially flushing btree
109 * nodes that are pinning the oldest journal entries first.
112 #include <linux/hash.h>
114 #include "journal_types.h"
118 static inline void journal_wake(struct journal *j)
121 closure_wake_up(&j->async_wait);
122 closure_wake_up(&j->preres_wait);
125 static inline struct journal_buf *journal_cur_buf(struct journal *j)
127 return j->buf + j->reservations.idx;
130 static inline struct journal_buf *journal_prev_buf(struct journal *j)
132 return j->buf + !j->reservations.idx;
135 /* Sequence number of oldest dirty journal entry */
137 static inline u64 journal_last_seq(struct journal *j)
142 static inline u64 journal_cur_seq(struct journal *j)
144 BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
146 return j->pin.back - 1;
149 u64 bch2_inode_journal_seq(struct journal *, u64);
151 static inline int journal_state_count(union journal_res_state s, int idx)
153 return idx == 0 ? s.buf0_count : s.buf1_count;
156 static inline void journal_state_inc(union journal_res_state *s)
158 s->buf0_count += s->idx == 0;
159 s->buf1_count += s->idx == 1;
162 static inline void bch2_journal_set_has_inode(struct journal *j,
163 struct journal_res *res,
166 struct journal_buf *buf = &j->buf[res->idx];
167 unsigned long bit = hash_64(inum, ilog2(sizeof(buf->has_inode) * 8));
169 /* avoid atomic op if possible */
170 if (unlikely(!test_bit(bit, buf->has_inode)))
171 set_bit(bit, buf->has_inode);
175 * Amount of space that will be taken up by some keys in the journal (i.e.
176 * including the jset header)
178 static inline unsigned jset_u64s(unsigned u64s)
180 return u64s + sizeof(struct jset_entry) / sizeof(u64);
183 static inline int journal_entry_overhead(struct journal *j)
185 return sizeof(struct jset) / sizeof(u64) + j->entry_u64s_reserved;
188 static inline struct jset_entry *
189 bch2_journal_add_entry_noreservation(struct journal_buf *buf, size_t u64s)
191 struct jset *jset = buf->data;
192 struct jset_entry *entry = vstruct_idx(jset, le32_to_cpu(jset->u64s));
194 memset(entry, 0, sizeof(*entry));
195 entry->u64s = cpu_to_le16(u64s);
197 le32_add_cpu(&jset->u64s, jset_u64s(u64s));
202 static inline struct jset_entry *
203 journal_res_entry(struct journal *j, struct journal_res *res)
205 return vstruct_idx(j->buf[res->idx].data, res->offset);
208 static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type,
209 enum btree_id id, unsigned level,
210 const void *data, unsigned u64s)
212 memset(entry, 0, sizeof(*entry));
213 entry->u64s = cpu_to_le16(u64s);
215 entry->btree_id = id;
216 entry->level = level;
217 memcpy_u64s_small(entry->_data, data, u64s);
219 return jset_u64s(u64s);
222 static inline void bch2_journal_add_entry(struct journal *j, struct journal_res *res,
223 unsigned type, enum btree_id id,
225 const void *data, unsigned u64s)
227 unsigned actual = journal_entry_set(journal_res_entry(j, res),
228 type, id, level, data, u64s);
231 EBUG_ON(actual > res->u64s);
233 res->offset += actual;
237 static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res,
238 enum btree_id id, const struct bkey_i *k)
240 bch2_journal_add_entry(j, res, BCH_JSET_ENTRY_btree_keys,
241 id, 0, k, k->k.u64s);
244 static inline bool journal_entry_empty(struct jset *j)
246 struct jset_entry *i;
248 if (j->seq != j->last_seq)
251 vstruct_for_each(j, i)
252 if (i->type == BCH_JSET_ENTRY_btree_keys && i->u64s)
257 void __bch2_journal_buf_put(struct journal *, bool);
259 static inline void bch2_journal_buf_put(struct journal *j, unsigned idx,
260 bool need_write_just_set)
262 union journal_res_state s;
264 s.v = atomic64_sub_return(((union journal_res_state) {
265 .buf0_count = idx == 0,
266 .buf1_count = idx == 1,
267 }).v, &j->reservations.counter);
268 if (!journal_state_count(s, idx)) {
269 EBUG_ON(s.idx == idx || !s.prev_buf_unwritten);
270 __bch2_journal_buf_put(j, need_write_just_set);
275 * This function releases the journal write structure so other threads can
276 * then proceed to add their keys as well.
278 static inline void bch2_journal_res_put(struct journal *j,
279 struct journal_res *res)
284 lock_release(&j->res_map, _THIS_IP_);
287 bch2_journal_add_entry(j, res,
288 BCH_JSET_ENTRY_btree_keys,
291 bch2_journal_buf_put(j, res->idx, false);
296 int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
299 #define JOURNAL_RES_GET_NONBLOCK (1 << 0)
300 #define JOURNAL_RES_GET_CHECK (1 << 1)
301 #define JOURNAL_RES_GET_RESERVED (1 << 2)
302 #define JOURNAL_RES_GET_RECLAIM (1 << 3)
304 static inline int journal_res_get_fast(struct journal *j,
305 struct journal_res *res,
308 union journal_res_state old, new;
309 u64 v = atomic64_read(&j->reservations.counter);
315 * Check if there is still room in the current journal
318 if (new.cur_entry_offset + res->u64s > j->cur_entry_u64s)
321 EBUG_ON(!journal_state_count(new, new.idx));
323 if (!(flags & JOURNAL_RES_GET_RESERVED) &&
324 !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags))
327 if (flags & JOURNAL_RES_GET_CHECK)
330 new.cur_entry_offset += res->u64s;
331 journal_state_inc(&new);
332 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
333 old.v, new.v)) != old.v);
337 res->offset = old.cur_entry_offset;
338 res->seq = le64_to_cpu(j->buf[old.idx].data->seq);
342 static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res,
343 unsigned u64s, unsigned flags)
348 EBUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
352 if (journal_res_get_fast(j, res, flags))
355 ret = bch2_journal_res_get_slowpath(j, res, flags);
359 if (!(flags & JOURNAL_RES_GET_CHECK)) {
360 lock_acquire_shared(&j->res_map, 0,
361 (flags & JOURNAL_RES_GET_NONBLOCK) != 0,
368 /* journal_preres: */
370 static inline bool journal_check_may_get_unreserved(struct journal *j)
372 union journal_preres_state s = READ_ONCE(j->prereserved);
373 bool ret = s.reserved <= s.remaining &&
374 fifo_free(&j->pin) > 8;
376 lockdep_assert_held(&j->lock);
378 if (ret != test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
380 set_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags);
383 clear_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags);
389 static inline void bch2_journal_preres_put(struct journal *j,
390 struct journal_preres *res)
392 union journal_preres_state s = { .reserved = res->u64s };
397 s.v = atomic64_sub_return(s.v, &j->prereserved.counter);
399 closure_wake_up(&j->preres_wait);
401 if (s.reserved <= s.remaining &&
402 !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
404 journal_check_may_get_unreserved(j);
405 spin_unlock(&j->lock);
409 int __bch2_journal_preres_get(struct journal *,
410 struct journal_preres *, unsigned, unsigned);
412 static inline int bch2_journal_preres_get_fast(struct journal *j,
413 struct journal_preres *res,
417 int d = new_u64s - res->u64s;
418 union journal_preres_state old, new;
419 u64 v = atomic64_read(&j->prereserved.counter);
427 * If we're being called from the journal reclaim path, we have
428 * to unconditionally give out the pre-reservation, there's
429 * nothing else sensible we can do - otherwise we'd recurse back
430 * into the reclaim path and deadlock:
433 if (!(flags & JOURNAL_RES_GET_RECLAIM) &&
434 new.reserved > new.remaining)
436 } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
437 old.v, new.v)) != old.v);
443 static inline int bch2_journal_preres_get(struct journal *j,
444 struct journal_preres *res,
448 if (new_u64s <= res->u64s)
451 if (bch2_journal_preres_get_fast(j, res, new_u64s, flags))
454 if (flags & JOURNAL_RES_GET_NONBLOCK)
457 return __bch2_journal_preres_get(j, res, new_u64s, flags);
460 /* journal_entry_res: */
462 void bch2_journal_entry_res_resize(struct journal *,
463 struct journal_entry_res *,
466 u64 bch2_journal_last_unwritten_seq(struct journal *);
467 int bch2_journal_open_seq_async(struct journal *, u64, struct closure *);
469 void bch2_journal_wait_on_seq(struct journal *, u64, struct closure *);
470 void bch2_journal_flush_seq_async(struct journal *, u64, struct closure *);
471 void bch2_journal_flush_async(struct journal *, struct closure *);
472 void bch2_journal_meta_async(struct journal *, struct closure *);
474 int bch2_journal_flush_seq(struct journal *, u64);
475 int bch2_journal_flush(struct journal *);
476 int bch2_journal_meta(struct journal *);
478 void bch2_journal_halt(struct journal *);
480 static inline int bch2_journal_error(struct journal *j)
482 return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL
488 static inline bool journal_flushes_device(struct bch_dev *ca)
493 static inline void bch2_journal_set_replay_done(struct journal *j)
495 BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
496 set_bit(JOURNAL_REPLAY_DONE, &j->flags);
499 void bch2_journal_unblock(struct journal *);
500 void bch2_journal_block(struct journal *);
502 ssize_t bch2_journal_print_debug(struct journal *, char *);
503 ssize_t bch2_journal_print_pins(struct journal *, char *);
505 int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
507 int bch2_dev_journal_alloc(struct bch_dev *);
509 void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
511 void bch2_fs_journal_stop(struct journal *);
512 int bch2_fs_journal_start(struct journal *, u64, struct list_head *);
514 void bch2_dev_journal_exit(struct bch_dev *);
515 int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *);
516 void bch2_fs_journal_exit(struct journal *);
517 int bch2_fs_journal_init(struct journal *);
519 #endif /* _BCACHEFS_JOURNAL_H */