1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_JOURNAL_TYPES_H
3 #define _BCACHEFS_JOURNAL_TYPES_H
5 #include <linux/cache.h>
6 #include <linux/workqueue.h>
8 #include "alloc_types.h"
9 #include "super_types.h"
12 #define JOURNAL_BUF_BITS 2
13 #define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS)
14 #define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1)
17 * We put JOURNAL_BUF_NR of these in struct journal; we used them for writes to
18 * the journal that are being staged or in flight.
23 __BKEY_PADDED(key, BCH_REPLICAS_MAX);
24 struct bch_devs_list devs_written;
26 struct closure_waitlist wait;
27 u64 last_seq; /* copy of data->last_seq */
31 unsigned buf_size; /* size in bytes of @data */
32 unsigned sectors; /* maximum size for current entry */
33 unsigned disk_sectors; /* maximum size entry could have been, if
34 buf_size was bigger */
35 unsigned u64s_reserved;
36 bool noflush; /* write has already been kicked off, and was noflush */
37 bool must_flush; /* something wants a flush */
42 * Something that makes a journal entry dirty - i.e. a btree node that has to be
46 struct journal_entry_pin_list {
47 struct list_head list;
48 struct list_head key_cache_list;
49 struct list_head flushed;
51 struct bch_devs_list devs;
55 struct journal_entry_pin;
56 typedef int (*journal_pin_flush_fn)(struct journal *j,
57 struct journal_entry_pin *, u64);
59 struct journal_entry_pin {
60 struct list_head list;
61 journal_pin_flush_fn flush;
74 * For reserving space in the journal prior to getting a reservation on a
75 * particular journal entry:
77 struct journal_preres {
81 union journal_res_state {
91 u64 cur_entry_offset:20,
101 union journal_preres_state {
118 #define JOURNAL_ENTRY_SIZE_MIN (64U << 10) /* 64k */
119 #define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */
122 * We stash some journal state as sentinal values in cur_entry_offset:
123 * note - cur_entry_offset is in units of u64s
125 #define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1)
127 #define JOURNAL_ENTRY_CLOSED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 1)
128 #define JOURNAL_ENTRY_ERROR_VAL (JOURNAL_ENTRY_OFFSET_MAX)
130 struct journal_space {
131 /* Units of 512 bytes sectors: */
132 unsigned next_entry; /* How big the next journal entry can be */
136 enum journal_space_from {
137 journal_space_discarded,
138 journal_space_clean_ondisk,
147 JOURNAL_MAY_GET_UNRESERVED,
148 JOURNAL_MAY_SKIP_FLUSH,
151 /* Embedded in struct bch_fs */
153 /* Fastpath stuff up front: */
157 union journal_res_state reservations;
159 /* Max size of current journal entry */
160 unsigned cur_entry_u64s;
161 unsigned cur_entry_sectors;
164 * 0, or -ENOSPC if waiting on journal reclaim, or -EROFS if
165 * insufficient devices:
170 cur_entry_max_in_flight,
171 cur_entry_journal_full,
172 cur_entry_journal_pin_full,
173 cur_entry_journal_stuck,
174 cur_entry_insufficient_devices,
177 union journal_preres_state prereserved;
179 /* Reserved space in journal entry to be used just prior to write */
180 unsigned entry_u64s_reserved;
182 unsigned buf_size_want;
185 * Two journal entries -- one is currently open for new entries, the
186 * other is possibly being written out.
188 struct journal_buf buf[JOURNAL_BUF_NR];
192 /* if nonzero, we may not open a new journal entry: */
195 /* Used when waiting because the journal was full */
196 wait_queue_head_t wait;
197 struct closure_waitlist async_wait;
198 struct closure_waitlist preres_wait;
201 struct delayed_work write_work;
203 /* Sequence number of most recent journal entry (last entry in @pin) */
206 /* seq, last_seq from the most recent journal entry successfully written */
208 u64 flushed_seq_ondisk;
214 * FIFO of journal entries whose btree updates have not yet been
217 * Each entry is a reference count. The position in the FIFO is the
218 * entry's sequence number relative to @seq.
220 * The journal entry itself holds a reference count, put when the
221 * journal entry is written out. Each btree node modified by the journal
222 * entry also holds a reference count, put when the btree node is
225 * When a reference count reaches zero, the journal entry is no longer
226 * needed. When all journal entries in the oldest journal bucket are no
227 * longer needed, the bucket can be discarded and reused.
230 u64 front, back, size, mask;
231 struct journal_entry_pin_list *data;
234 struct journal_space space[journal_space_nr];
236 u64 replay_journal_seq;
237 u64 replay_journal_seq_end;
239 struct write_point wp;
242 struct mutex reclaim_lock;
243 wait_queue_head_t reclaim_wait;
244 struct task_struct *reclaim_thread;
246 unsigned long next_reclaim;
247 u64 nr_direct_reclaim;
248 u64 nr_background_reclaim;
250 unsigned long last_flushed;
251 struct journal_entry_pin *flush_in_progress;
252 bool flush_in_progress_dropped;
253 wait_queue_head_t pin_flush_wait;
255 /* protects advancing ja->discard_idx: */
256 struct mutex discard_lock;
259 unsigned long last_flush_write;
261 u64 res_get_blocked_start;
262 u64 write_start_time;
265 u64 nr_noflush_writes;
267 struct time_stats *flush_write_time;
268 struct time_stats *noflush_write_time;
269 struct time_stats *blocked_time;
270 struct time_stats *flush_seq_time;
272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
273 struct lockdep_map res_map;
278 * Embedded in struct bch_dev. First three fields refer to the array of journal
279 * buckets, in bch_sb.
281 struct journal_device {
283 * For each journal bucket, contains the max sequence number of the
284 * journal writes it contains - so we know when a bucket can be reused.
288 unsigned sectors_free;
291 * discard_idx <= dirty_idx_ondisk <= dirty_idx <= cur_idx:
293 unsigned discard_idx; /* Next bucket to discard */
294 unsigned dirty_idx_ondisk;
296 unsigned cur_idx; /* Journal bucket we're currently writing to */
301 /* Bio for journal reads/writes to this device */
304 /* for bch_journal_read_device */
309 * journal_entry_res - reserve space in every journal entry:
311 struct journal_entry_res {
315 #endif /* _BCACHEFS_JOURNAL_TYPES_H */