1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_JOURNAL_TYPES_H
3 #define _BCACHEFS_JOURNAL_TYPES_H
5 #include <linux/cache.h>
6 #include <linux/workqueue.h>
8 #include "alloc_types.h"
9 #include "super_types.h"
12 #define JOURNAL_BUF_BITS 2
13 #define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS)
14 #define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1)
17 * We put JOURNAL_BUF_NR of these in struct journal; we used them for writes to
18 * the journal that are being staged or in flight.
23 __BKEY_PADDED(key, BCH_REPLICAS_MAX);
25 struct closure_waitlist wait;
27 unsigned buf_size; /* size in bytes of @data */
28 unsigned sectors; /* maximum size for current entry */
29 unsigned disk_sectors; /* maximum size entry could have been, if
30 buf_size was bigger */
31 unsigned u64s_reserved;
32 bool noflush; /* write has already been kicked off, and was noflush */
33 bool must_flush; /* something wants a flush */
36 unsigned long has_inode[1024 / sizeof(unsigned long)];
40 * Something that makes a journal entry dirty - i.e. a btree node that has to be
44 struct journal_entry_pin_list {
45 struct list_head list;
46 struct list_head flushed;
48 struct bch_devs_list devs;
52 struct journal_entry_pin;
53 typedef void (*journal_pin_flush_fn)(struct journal *j,
54 struct journal_entry_pin *, u64);
56 struct journal_entry_pin {
57 struct list_head list;
58 journal_pin_flush_fn flush;
71 * For reserving space in the journal prior to getting a reservation on a
72 * particular journal entry:
74 struct journal_preres {
78 union journal_res_state {
88 u64 cur_entry_offset:20,
98 union journal_preres_state {
114 #define JOURNAL_ENTRY_SIZE_MIN (64U << 10) /* 64k */
115 #define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */
118 * We stash some journal state as sentinal values in cur_entry_offset:
119 * note - cur_entry_offset is in units of u64s
121 #define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1)
123 #define JOURNAL_ENTRY_CLOSED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 1)
124 #define JOURNAL_ENTRY_ERROR_VAL (JOURNAL_ENTRY_OFFSET_MAX)
126 struct journal_space {
127 /* Units of 512 bytes sectors: */
128 unsigned next_entry; /* How big the next journal entry can be */
132 enum journal_space_from {
133 journal_space_discarded,
134 journal_space_clean_ondisk,
141 * JOURNAL_NEED_WRITE - current (pending) journal entry should be written ASAP,
142 * either because something's waiting on the write to complete or because it's
143 * been dirty too long and the timer's expired.
149 JOURNAL_RECLAIM_STARTED,
151 JOURNAL_MAY_GET_UNRESERVED,
152 JOURNAL_MAY_SKIP_FLUSH,
155 /* Embedded in struct bch_fs */
157 /* Fastpath stuff up front: */
161 union journal_res_state reservations;
163 /* Max size of current journal entry */
164 unsigned cur_entry_u64s;
165 unsigned cur_entry_sectors;
168 * 0, or -ENOSPC if waiting on journal reclaim, or -EROFS if
169 * insufficient devices:
174 cur_entry_journal_full,
175 cur_entry_journal_pin_full,
176 cur_entry_journal_stuck,
177 cur_entry_insufficient_devices,
180 union journal_preres_state prereserved;
182 /* Reserved space in journal entry to be used just prior to write */
183 unsigned entry_u64s_reserved;
185 unsigned buf_size_want;
188 * Two journal entries -- one is currently open for new entries, the
189 * other is possibly being written out.
191 struct journal_buf buf[JOURNAL_BUF_NR];
195 /* if nonzero, we may not open a new journal entry: */
198 /* Used when waiting because the journal was full */
199 wait_queue_head_t wait;
200 struct closure_waitlist async_wait;
201 struct closure_waitlist preres_wait;
204 struct delayed_work write_work;
206 /* Sequence number of most recent journal entry (last entry in @pin) */
209 /* seq, last_seq from the most recent journal entry successfully written */
211 u64 flushed_seq_ondisk;
217 * FIFO of journal entries whose btree updates have not yet been
220 * Each entry is a reference count. The position in the FIFO is the
221 * entry's sequence number relative to @seq.
223 * The journal entry itself holds a reference count, put when the
224 * journal entry is written out. Each btree node modified by the journal
225 * entry also holds a reference count, put when the btree node is
228 * When a reference count reaches zero, the journal entry is no longer
229 * needed. When all journal entries in the oldest journal bucket are no
230 * longer needed, the bucket can be discarded and reused.
233 u64 front, back, size, mask;
234 struct journal_entry_pin_list *data;
237 struct journal_space space[journal_space_nr];
239 u64 replay_journal_seq;
240 u64 replay_journal_seq_end;
242 struct write_point wp;
245 struct mutex reclaim_lock;
246 struct task_struct *reclaim_thread;
248 u64 nr_direct_reclaim;
249 u64 nr_background_reclaim;
251 unsigned long last_flushed;
252 struct journal_entry_pin *flush_in_progress;
253 wait_queue_head_t pin_flush_wait;
255 /* protects advancing ja->discard_idx: */
256 struct mutex discard_lock;
259 unsigned write_delay_ms;
260 unsigned reclaim_delay_ms;
261 unsigned long last_flush_write;
263 u64 res_get_blocked_start;
265 u64 write_start_time;
268 u64 nr_noflush_writes;
270 struct time_stats *write_time;
271 struct time_stats *delay_time;
272 struct time_stats *blocked_time;
273 struct time_stats *flush_seq_time;
275 #ifdef CONFIG_DEBUG_LOCK_ALLOC
276 struct lockdep_map res_map;
281 * Embedded in struct bch_dev. First three fields refer to the array of journal
282 * buckets, in bch_sb.
284 struct journal_device {
286 * For each journal bucket, contains the max sequence number of the
287 * journal writes it contains - so we know when a bucket can be reused.
291 unsigned sectors_free;
294 * discard_idx <= dirty_idx_ondisk <= dirty_idx <= cur_idx:
296 unsigned discard_idx; /* Next bucket to discard */
297 unsigned dirty_idx_ondisk;
299 unsigned cur_idx; /* Journal bucket we're currently writing to */
304 /* Bio for journal reads/writes to this device */
307 /* for bch_journal_read_device */
312 * journal_entry_res - reserve space in every journal entry:
314 struct journal_entry_res {
318 #endif /* _BCACHEFS_JOURNAL_TYPES_H */