1 #ifndef _BCACHE_JOURNAL_H
2 #define _BCACHE_JOURNAL_H
7 * The primary purpose of the journal is to log updates (insertions) to the
8 * b-tree, to avoid having to do synchronous updates to the b-tree on disk.
10 * Without the journal, the b-tree is always internally consistent on
11 * disk - and in fact, in the earliest incarnations bcache didn't have a journal
12 * but did handle unclean shutdowns by doing all index updates synchronously
15 * Updates to interior nodes still happen synchronously and without the journal
16 * (for simplicity) - this may change eventually but updates to interior nodes
17 * are rare enough it's not a huge priority.
19 * This means the journal is relatively separate from the b-tree; it consists of
20 * just a list of keys and journal replay consists of just redoing those
21 * insertions in same order that they appear in the journal.
25 * For synchronous updates (where we're waiting on the index update to hit
26 * disk), the journal entry will be written out immediately (or as soon as
27 * possible, if the write for the previous journal entry was still in flight).
29 * Synchronous updates are specified by passing a closure (@flush_cl) to
30 * bch_btree_insert() or bch_btree_insert_node(), which then pass that parameter
31 * down to the journalling code. That closure will will wait on the journal
32 * write to complete (via closure_wait()).
34 * If the index update wasn't synchronous, the journal entry will be
35 * written out after 10 ms have elapsed, by default (the delay_ms field
40 * A journal entry is variable size (struct jset), it's got a fixed length
41 * header and then a variable number of struct jset_entry entries.
43 * Journal entries are identified by monotonically increasing 64 bit sequence
44 * numbers - jset->seq; other places in the code refer to this sequence number.
46 * A jset_entry entry contains one or more bkeys (which is what gets inserted
47 * into the b-tree). We need a container to indicate which b-tree the key is
48 * for; also, the roots of the various b-trees are stored in jset_entry entries
49 * (one for each b-tree) - this lets us add new b-tree types without changing
52 * We also keep some things in the journal header that are logically part of the
53 * superblock - all the things that are frequently updated. This is for future
54 * bcache on raw flash support; the superblock (which will become another
55 * journal) can't be moved or wear leveled, so it contains just enough
56 * information to find the main journal, and the superblock only has to be
57 * rewritten when we want to move/wear level the main journal.
59 * JOURNAL LAYOUT ON DISK:
61 * The journal is written to a ringbuffer of buckets (which is kept in the
62 * superblock); the individual buckets are not necessarily contiguous on disk
63 * which means that journal entries are not allowed to span buckets, but also
64 * that we can resize the journal at runtime if desired (unimplemented).
66 * The journal buckets exist in the same pool as all the other buckets that are
67 * managed by the allocator and garbage collection - garbage collection marks
68 * the journal buckets as metadata buckets.
70 * OPEN/DIRTY JOURNAL ENTRIES:
72 * Open/dirty journal entries are journal entries that contain b-tree updates
73 * that have not yet been written out to the b-tree on disk. We have to track
74 * which journal entries are dirty, and we also have to avoid wrapping around
75 * the journal and overwriting old but still dirty journal entries with new
78 * On disk, this is represented with the "last_seq" field of struct jset;
79 * last_seq is the first sequence number that journal replay has to replay.
81 * To avoid overwriting dirty journal entries on disk, we keep a mapping (in
82 * journal_device->seq) of for each journal bucket, the highest sequence number
83 * any journal entry it contains. Then, by comparing that against last_seq we
84 * can determine whether that journal bucket contains dirty journal entries or
87 * To track which journal entries are dirty, we maintain a fifo of refcounts
88 * (where each entry corresponds to a specific sequence number) - when a ref
89 * goes to 0, that journal entry is no longer dirty.
91 * Journalling of index updates is done at the same time as the b-tree itself is
92 * being modified (see btree_insert_key()); when we add the key to the journal
93 * the pending b-tree write takes a ref on the journal entry the key was added
94 * to. If a pending b-tree write would need to take refs on multiple dirty
95 * journal entries, it only keeps the ref on the oldest one (since a newer
96 * journal entry will still be replayed if an older entry was dirty).
100 * There are two ways the journal could fill up; either we could run out of
101 * space to write to, or we could have too many open journal entries and run out
102 * of room in the fifo of refcounts. Since those refcounts are decremented
103 * without any locking we can't safely resize that fifo, so we handle it the
106 * If the journal fills up, we start flushing dirty btree nodes until we can
107 * allocate space for a journal write again - preferentially flushing btree
108 * nodes that are pinning the oldest journal entries first.
111 #include <linux/hash.h>
113 #include "journal_types.h"
115 static inline struct jset_entry *jset_keys_next(struct jset_entry *j)
117 return (void *) __bkey_idx(j, le16_to_cpu(j->u64s));
121 * Only used for holding the journal entries we read in btree_journal_read()
122 * during cache_registration
124 struct journal_replay {
125 struct list_head list;
129 #define JOURNAL_PIN ((32 * 1024) - 1)
131 static inline bool journal_pin_active(struct journal_entry_pin *pin)
133 return pin->pin_list != NULL;
136 void bch_journal_pin_add(struct journal *, struct journal_entry_pin *,
137 journal_pin_flush_fn);
138 void bch_journal_pin_drop(struct journal *, struct journal_entry_pin *);
139 void bch_journal_pin_add_if_older(struct journal *,
140 struct journal_entry_pin *,
141 struct journal_entry_pin *,
142 journal_pin_flush_fn);
148 struct bkey_i *bch_journal_find_btree_root(struct cache_set *, struct jset *,
149 enum btree_id, unsigned *);
151 int bch_journal_seq_should_ignore(struct cache_set *, u64, struct btree *);
153 u64 bch_inode_journal_seq(struct journal *, u64);
155 static inline int journal_state_count(union journal_res_state s, int idx)
157 return idx == 0 ? s.buf0_count : s.buf1_count;
160 static inline void journal_state_inc(union journal_res_state *s)
162 s->buf0_count += s->idx == 0;
163 s->buf1_count += s->idx == 1;
166 static inline void bch_journal_set_has_inode(struct journal_buf *buf, u64 inum)
168 set_bit(hash_64(inum, ilog2(sizeof(buf->has_inode) * 8)), buf->has_inode);
172 * Amount of space that will be taken up by some keys in the journal (i.e.
173 * including the jset header)
175 static inline unsigned jset_u64s(unsigned u64s)
177 return u64s + sizeof(struct jset_entry) / sizeof(u64);
180 static inline void bch_journal_add_entry_at(struct journal_buf *buf,
181 const void *data, size_t u64s,
182 unsigned type, enum btree_id id,
183 unsigned level, unsigned offset)
185 struct jset_entry *entry = bkey_idx(buf->data, offset);
187 entry->u64s = cpu_to_le16(u64s);
188 entry->btree_id = id;
189 entry->level = level;
191 SET_JOURNAL_ENTRY_TYPE(entry, type);
193 memcpy_u64s(entry->_data, data, u64s);
196 static inline void bch_journal_add_keys(struct journal *j, struct journal_res *res,
197 enum btree_id id, const struct bkey_i *k)
199 struct journal_buf *buf = &j->buf[res->idx];
200 unsigned actual = jset_u64s(k->k.u64s);
203 BUG_ON(actual > res->u64s);
205 bch_journal_set_has_inode(buf, k->k.p.inode);
207 bch_journal_add_entry_at(buf, k, k->k.u64s,
208 JOURNAL_ENTRY_BTREE_KEYS, id,
211 res->offset += actual;
215 void bch_journal_buf_put_slowpath(struct journal *, bool);
217 static inline void bch_journal_buf_put(struct journal *j, unsigned idx,
218 bool need_write_just_set)
220 union journal_res_state s;
222 s.v = atomic64_sub_return(((union journal_res_state) {
223 .buf0_count = idx == 0,
224 .buf1_count = idx == 1,
225 }).v, &j->reservations.counter);
227 EBUG_ON(s.idx != idx && !s.prev_buf_unwritten);
230 * Do not initiate a journal write if the journal is in an error state
231 * (previous journal entry write may have failed)
234 !journal_state_count(s, idx) &&
235 s.cur_entry_offset != JOURNAL_ENTRY_ERROR_VAL)
236 bch_journal_buf_put_slowpath(j, need_write_just_set);
240 * This function releases the journal write structure so other threads can
241 * then proceed to add their keys as well.
243 static inline void bch_journal_res_put(struct journal *j,
244 struct journal_res *res)
249 lock_release(&j->res_map, 0, _RET_IP_);
252 bch_journal_add_entry_at(&j->buf[res->idx], NULL, 0,
253 JOURNAL_ENTRY_BTREE_KEYS,
255 res->offset += jset_u64s(0);
256 res->u64s -= jset_u64s(0);
259 bch_journal_buf_put(j, res->idx, false);
264 int bch_journal_res_get_slowpath(struct journal *, struct journal_res *,
267 static inline int journal_res_get_fast(struct journal *j,
268 struct journal_res *res,
272 union journal_res_state old, new;
273 u64 v = atomic64_read(&j->reservations.counter);
279 * Check if there is still room in the current journal
282 if (old.cur_entry_offset + u64s_min > j->cur_entry_u64s)
285 res->offset = old.cur_entry_offset;
286 res->u64s = min(u64s_max, j->cur_entry_u64s -
287 old.cur_entry_offset);
289 journal_state_inc(&new);
290 new.cur_entry_offset += res->u64s;
291 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
292 old.v, new.v)) != old.v);
296 res->seq = le64_to_cpu(j->buf[res->idx].data->seq);
300 static inline int bch_journal_res_get(struct journal *j, struct journal_res *res,
301 unsigned u64s_min, unsigned u64s_max)
306 EBUG_ON(u64s_max < u64s_min);
308 if (journal_res_get_fast(j, res, u64s_min, u64s_max))
311 ret = bch_journal_res_get_slowpath(j, res, u64s_min, u64s_max);
315 lock_acquire_shared(&j->res_map, 0, 0, NULL, _THIS_IP_);
320 void bch_journal_wait_on_seq(struct journal *, u64, struct closure *);
321 void bch_journal_flush_seq_async(struct journal *, u64, struct closure *);
322 void bch_journal_flush_async(struct journal *, struct closure *);
323 void bch_journal_meta_async(struct journal *, struct closure *);
325 int bch_journal_flush_seq(struct journal *, u64);
326 int bch_journal_flush(struct journal *);
327 int bch_journal_meta(struct journal *);
329 void bch_journal_halt(struct journal *);
331 static inline int bch_journal_error(struct journal *j)
333 return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL
337 static inline bool is_journal_device(struct cache *ca)
339 return ca->mi.state == CACHE_ACTIVE && ca->mi.tier == 0;
342 static inline bool journal_flushes_device(struct cache *ca)
347 void bch_journal_start(struct cache_set *);
348 void bch_journal_mark(struct cache_set *, struct list_head *);
349 void bch_journal_entries_free(struct list_head *);
350 int bch_journal_read(struct cache_set *, struct list_head *);
351 int bch_journal_replay(struct cache_set *, struct list_head *);
353 static inline void bch_journal_set_replay_done(struct journal *j)
356 BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
358 set_bit(JOURNAL_REPLAY_DONE, &j->flags);
359 j->cur_pin_list = &fifo_peek_back(&j->pin);
360 spin_unlock(&j->lock);
363 void bch_journal_free(struct journal *);
364 int bch_journal_alloc(struct journal *, unsigned);
366 ssize_t bch_journal_print_debug(struct journal *, char *);
368 int bch_cache_journal_alloc(struct cache *);
370 static inline __le64 *__journal_buckets(struct cache_sb *sb)
372 return sb->_data + bch_journal_buckets_offset(sb);
375 static inline u64 journal_bucket(struct cache_sb *sb, unsigned nr)
377 return le64_to_cpu(__journal_buckets(sb)[nr]);
380 static inline void set_journal_bucket(struct cache_sb *sb, unsigned nr, u64 bucket)
382 __journal_buckets(sb)[nr] = cpu_to_le64(bucket);
385 int bch_journal_move(struct cache *);
387 #endif /* _BCACHE_JOURNAL_H */