1 #ifndef _BCACHEFS_JOURNAL_H
2 #define _BCACHEFS_JOURNAL_H
7 * The primary purpose of the journal is to log updates (insertions) to the
8 * b-tree, to avoid having to do synchronous updates to the b-tree on disk.
10 * Without the journal, the b-tree is always internally consistent on
11 * disk - and in fact, in the earliest incarnations bcache didn't have a journal
12 * but did handle unclean shutdowns by doing all index updates synchronously
15 * Updates to interior nodes still happen synchronously and without the journal
16 * (for simplicity) - this may change eventually but updates to interior nodes
17 * are rare enough it's not a huge priority.
19 * This means the journal is relatively separate from the b-tree; it consists of
20 * just a list of keys and journal replay consists of just redoing those
21 * insertions in same order that they appear in the journal.
25 * For synchronous updates (where we're waiting on the index update to hit
26 * disk), the journal entry will be written out immediately (or as soon as
27 * possible, if the write for the previous journal entry was still in flight).
29 * Synchronous updates are specified by passing a closure (@flush_cl) to
30 * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
31 * down to the journalling code. That closure will will wait on the journal
32 * write to complete (via closure_wait()).
34 * If the index update wasn't synchronous, the journal entry will be
35 * written out after 10 ms have elapsed, by default (the delay_ms field
40 * A journal entry is variable size (struct jset), it's got a fixed length
41 * header and then a variable number of struct jset_entry entries.
43 * Journal entries are identified by monotonically increasing 64 bit sequence
44 * numbers - jset->seq; other places in the code refer to this sequence number.
46 * A jset_entry entry contains one or more bkeys (which is what gets inserted
47 * into the b-tree). We need a container to indicate which b-tree the key is
48 * for; also, the roots of the various b-trees are stored in jset_entry entries
49 * (one for each b-tree) - this lets us add new b-tree types without changing
52 * We also keep some things in the journal header that are logically part of the
53 * superblock - all the things that are frequently updated. This is for future
54 * bcache on raw flash support; the superblock (which will become another
55 * journal) can't be moved or wear leveled, so it contains just enough
56 * information to find the main journal, and the superblock only has to be
57 * rewritten when we want to move/wear level the main journal.
59 * JOURNAL LAYOUT ON DISK:
61 * The journal is written to a ringbuffer of buckets (which is kept in the
62 * superblock); the individual buckets are not necessarily contiguous on disk
63 * which means that journal entries are not allowed to span buckets, but also
64 * that we can resize the journal at runtime if desired (unimplemented).
66 * The journal buckets exist in the same pool as all the other buckets that are
67 * managed by the allocator and garbage collection - garbage collection marks
68 * the journal buckets as metadata buckets.
70 * OPEN/DIRTY JOURNAL ENTRIES:
72 * Open/dirty journal entries are journal entries that contain b-tree updates
73 * that have not yet been written out to the b-tree on disk. We have to track
74 * which journal entries are dirty, and we also have to avoid wrapping around
75 * the journal and overwriting old but still dirty journal entries with new
78 * On disk, this is represented with the "last_seq" field of struct jset;
79 * last_seq is the first sequence number that journal replay has to replay.
81 * To avoid overwriting dirty journal entries on disk, we keep a mapping (in
82 * journal_device->seq) of for each journal bucket, the highest sequence number
83 * any journal entry it contains. Then, by comparing that against last_seq we
84 * can determine whether that journal bucket contains dirty journal entries or
87 * To track which journal entries are dirty, we maintain a fifo of refcounts
88 * (where each entry corresponds to a specific sequence number) - when a ref
89 * goes to 0, that journal entry is no longer dirty.
91 * Journalling of index updates is done at the same time as the b-tree itself is
92 * being modified (see btree_insert_key()); when we add the key to the journal
93 * the pending b-tree write takes a ref on the journal entry the key was added
94 * to. If a pending b-tree write would need to take refs on multiple dirty
95 * journal entries, it only keeps the ref on the oldest one (since a newer
96 * journal entry will still be replayed if an older entry was dirty).
100 * There are two ways the journal could fill up; either we could run out of
101 * space to write to, or we could have too many open journal entries and run out
102 * of room in the fifo of refcounts. Since those refcounts are decremented
103 * without any locking we can't safely resize that fifo, so we handle it the
106 * If the journal fills up, we start flushing dirty btree nodes until we can
107 * allocate space for a journal write again - preferentially flushing btree
108 * nodes that are pinning the oldest journal entries first.
111 #include <linux/hash.h>
113 #include "journal_types.h"
117 static inline void journal_wake(struct journal *j)
120 closure_wake_up(&j->async_wait);
123 static inline struct journal_buf *journal_cur_buf(struct journal *j)
125 return j->buf + j->reservations.idx;
128 static inline struct journal_buf *journal_prev_buf(struct journal *j)
130 return j->buf + !j->reservations.idx;
133 /* Sequence number of oldest dirty journal entry */
135 static inline u64 journal_last_seq(struct journal *j)
140 static inline u64 journal_cur_seq(struct journal *j)
142 BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
144 return j->pin.back - 1;
147 u64 bch2_inode_journal_seq(struct journal *, u64);
149 static inline int journal_state_count(union journal_res_state s, int idx)
151 return idx == 0 ? s.buf0_count : s.buf1_count;
154 static inline void journal_state_inc(union journal_res_state *s)
156 s->buf0_count += s->idx == 0;
157 s->buf1_count += s->idx == 1;
160 static inline void bch2_journal_set_has_inode(struct journal *j,
161 struct journal_res *res,
164 struct journal_buf *buf = &j->buf[res->idx];
165 unsigned long bit = hash_64(inum, ilog2(sizeof(buf->has_inode) * 8));
167 /* avoid atomic op if possible */
168 if (unlikely(!test_bit(bit, buf->has_inode)))
169 set_bit(bit, buf->has_inode);
173 * Amount of space that will be taken up by some keys in the journal (i.e.
174 * including the jset header)
176 static inline unsigned jset_u64s(unsigned u64s)
178 return u64s + sizeof(struct jset_entry) / sizeof(u64);
181 static inline struct jset_entry *
182 bch2_journal_add_entry_noreservation(struct journal_buf *buf, size_t u64s)
184 struct jset *jset = buf->data;
185 struct jset_entry *entry = vstruct_idx(jset, le32_to_cpu(jset->u64s));
187 memset(entry, 0, sizeof(*entry));
188 entry->u64s = cpu_to_le16(u64s);
190 le32_add_cpu(&jset->u64s, jset_u64s(u64s));
195 static inline void bch2_journal_add_entry(struct journal *j, struct journal_res *res,
196 unsigned type, enum btree_id id,
198 const void *data, unsigned u64s)
200 struct journal_buf *buf = &j->buf[res->idx];
201 struct jset_entry *entry = vstruct_idx(buf->data, res->offset);
202 unsigned actual = jset_u64s(u64s);
205 EBUG_ON(actual > res->u64s);
207 res->offset += actual;
210 memset(entry, 0, sizeof(*entry));
211 entry->u64s = cpu_to_le16(u64s);
213 entry->btree_id = id;
214 entry->level = level;
215 memcpy_u64s(entry->_data, data, u64s);
218 static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res,
219 enum btree_id id, const struct bkey_i *k)
221 bch2_journal_add_entry(j, res, BCH_JSET_ENTRY_btree_keys,
222 id, 0, k, k->k.u64s);
225 void bch2_journal_buf_put_slowpath(struct journal *, bool);
227 static inline void bch2_journal_buf_put(struct journal *j, unsigned idx,
228 bool need_write_just_set)
230 union journal_res_state s;
232 s.v = atomic64_sub_return(((union journal_res_state) {
233 .buf0_count = idx == 0,
234 .buf1_count = idx == 1,
235 }).v, &j->reservations.counter);
237 EBUG_ON(s.idx != idx && !s.prev_buf_unwritten);
240 * Do not initiate a journal write if the journal is in an error state
241 * (previous journal entry write may have failed)
244 !journal_state_count(s, idx) &&
245 s.cur_entry_offset != JOURNAL_ENTRY_ERROR_VAL)
246 bch2_journal_buf_put_slowpath(j, need_write_just_set);
250 * This function releases the journal write structure so other threads can
251 * then proceed to add their keys as well.
253 static inline void bch2_journal_res_put(struct journal *j,
254 struct journal_res *res)
259 lock_release(&j->res_map, 0, _RET_IP_);
262 bch2_journal_add_entry(j, res,
263 BCH_JSET_ENTRY_btree_keys,
266 bch2_journal_buf_put(j, res->idx, false);
271 int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
274 static inline int journal_res_get_fast(struct journal *j,
275 struct journal_res *res,
279 union journal_res_state old, new;
280 u64 v = atomic64_read(&j->reservations.counter);
286 * Check if there is still room in the current journal
289 if (old.cur_entry_offset + u64s_min > j->cur_entry_u64s)
292 res->offset = old.cur_entry_offset;
293 res->u64s = min(u64s_max, j->cur_entry_u64s -
294 old.cur_entry_offset);
296 journal_state_inc(&new);
297 new.cur_entry_offset += res->u64s;
298 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
299 old.v, new.v)) != old.v);
303 res->seq = le64_to_cpu(j->buf[res->idx].data->seq);
307 static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res,
308 unsigned u64s_min, unsigned u64s_max)
313 EBUG_ON(u64s_max < u64s_min);
314 EBUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
316 if (journal_res_get_fast(j, res, u64s_min, u64s_max))
319 ret = bch2_journal_res_get_slowpath(j, res, u64s_min, u64s_max);
323 lock_acquire_shared(&j->res_map, 0, 0, NULL, _THIS_IP_);
328 u64 bch2_journal_last_unwritten_seq(struct journal *);
329 int bch2_journal_open_seq_async(struct journal *, u64, struct closure *);
331 void bch2_journal_wait_on_seq(struct journal *, u64, struct closure *);
332 void bch2_journal_flush_seq_async(struct journal *, u64, struct closure *);
333 void bch2_journal_flush_async(struct journal *, struct closure *);
334 void bch2_journal_meta_async(struct journal *, struct closure *);
336 int bch2_journal_flush_seq(struct journal *, u64);
337 int bch2_journal_flush(struct journal *);
338 int bch2_journal_meta(struct journal *);
340 void bch2_journal_halt(struct journal *);
342 static inline int bch2_journal_error(struct journal *j)
344 return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL
350 static inline bool journal_flushes_device(struct bch_dev *ca)
355 int bch2_journal_mark(struct bch_fs *, struct list_head *);
356 void bch2_journal_entries_free(struct list_head *);
357 int bch2_journal_replay(struct bch_fs *, struct list_head *);
359 static inline void bch2_journal_set_replay_done(struct journal *j)
361 BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
362 set_bit(JOURNAL_REPLAY_DONE, &j->flags);
365 ssize_t bch2_journal_print_debug(struct journal *, char *);
366 ssize_t bch2_journal_print_pins(struct journal *, char *);
368 int bch2_dev_journal_alloc(struct bch_dev *);
370 void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
371 void bch2_fs_journal_stop(struct journal *);
372 void bch2_fs_journal_start(struct journal *);
373 void bch2_dev_journal_exit(struct bch_dev *);
374 int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *);
375 void bch2_fs_journal_exit(struct journal *);
376 int bch2_fs_journal_init(struct journal *);
378 #endif /* _BCACHEFS_JOURNAL_H */