4 #include "journal_reclaim.h"
9 * Journal entry pinning - machinery for holding a reference on a given journal
10 * entry, holding it open to ensure it gets replayed during recovery:
13 static inline void __journal_pin_add(struct journal *j,
15 struct journal_entry_pin *pin,
16 journal_pin_flush_fn flush_fn)
18 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
20 BUG_ON(journal_pin_active(pin));
21 BUG_ON(!atomic_read(&pin_list->count));
23 atomic_inc(&pin_list->count);
25 pin->flush = flush_fn;
28 list_add(&pin->list, &pin_list->list);
30 INIT_LIST_HEAD(&pin->list);
33 * If the journal is currently full, we might want to call flush_fn
39 void bch2_journal_pin_add(struct journal *j, u64 seq,
40 struct journal_entry_pin *pin,
41 journal_pin_flush_fn flush_fn)
44 __journal_pin_add(j, seq, pin, flush_fn);
45 spin_unlock(&j->lock);
48 static inline void __journal_pin_drop(struct journal *j,
49 struct journal_entry_pin *pin)
51 struct journal_entry_pin_list *pin_list;
53 if (!journal_pin_active(pin))
56 pin_list = journal_seq_pin(j, pin->seq);
58 list_del_init(&pin->list);
61 * Unpinning a journal entry make make journal_next_bucket() succeed, if
62 * writing a new last_seq will now make another bucket available:
64 if (atomic_dec_and_test(&pin_list->count) &&
65 pin_list == &fifo_peek_front(&j->pin))
66 bch2_journal_reclaim_fast(j);
69 void bch2_journal_pin_drop(struct journal *j,
70 struct journal_entry_pin *pin)
73 __journal_pin_drop(j, pin);
74 spin_unlock(&j->lock);
77 void bch2_journal_pin_update(struct journal *j, u64 seq,
78 struct journal_entry_pin *pin,
79 journal_pin_flush_fn flush_fn)
83 if (pin->seq != seq) {
84 __journal_pin_drop(j, pin);
85 __journal_pin_add(j, seq, pin, flush_fn);
87 struct journal_entry_pin_list *pin_list =
88 journal_seq_pin(j, seq);
90 list_move(&pin->list, &pin_list->list);
93 spin_unlock(&j->lock);
96 void bch2_journal_pin_add_if_older(struct journal *j,
97 struct journal_entry_pin *src_pin,
98 struct journal_entry_pin *pin,
99 journal_pin_flush_fn flush_fn)
103 if (journal_pin_active(src_pin) &&
104 (!journal_pin_active(pin) ||
105 src_pin->seq < pin->seq)) {
106 __journal_pin_drop(j, pin);
107 __journal_pin_add(j, src_pin->seq, pin, flush_fn);
110 spin_unlock(&j->lock);
113 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
115 BUG_ON(journal_pin_active(pin));
117 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
121 * Journal reclaim: flush references to open journal entries to reclaim space in
124 * May be done by the journal code in the background as needed to free up space
125 * for more journal entries, or as part of doing a clean shutdown, or to migrate
126 * data off of a specific device:
130 * bch2_journal_reclaim_fast - do the fast part of journal reclaim
132 * Called from IO submission context, does not block. Cleans up after btree
133 * write completions by advancing the journal pin and each cache's last_idx,
134 * kicking off discards and background reclaim as necessary.
136 void bch2_journal_reclaim_fast(struct journal *j)
138 struct journal_entry_pin_list temp;
141 lockdep_assert_held(&j->lock);
144 * Unpin journal entries whose reference counts reached zero, meaning
145 * all btree nodes got written out
147 while (!fifo_empty(&j->pin) &&
148 !atomic_read(&fifo_peek_front(&j->pin).count)) {
149 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
150 BUG_ON(!fifo_pop(&j->pin, temp));
158 static void journal_pin_mark_flushing(struct journal *j,
159 struct journal_entry_pin *pin,
162 lockdep_assert_held(&j->reclaim_lock);
164 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
165 BUG_ON(j->flush_in_progress);
166 j->flush_in_progress = pin;
169 static void journal_pin_flush(struct journal *j,
170 struct journal_entry_pin *pin,
173 pin->flush(j, pin, seq);
175 BUG_ON(j->flush_in_progress != pin);
176 j->flush_in_progress = NULL;
177 wake_up(&j->pin_flush_wait);
180 static struct journal_entry_pin *
181 journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
183 struct journal_entry_pin_list *pin_list;
184 struct journal_entry_pin *ret = NULL;
186 /* no need to iterate over empty fifo entries: */
187 bch2_journal_reclaim_fast(j);
189 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
190 if (*seq > seq_to_flush ||
191 (ret = list_first_entry_or_null(&pin_list->list,
192 struct journal_entry_pin, list)))
198 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
204 (ja->last_idx != ja->cur_idx &&
205 ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk);
206 spin_unlock(&j->lock);
212 * bch2_journal_reclaim_work - free up journal buckets
214 * Background journal reclaim writes out btree nodes. It should be run
215 * early enough so that we never completely run out of journal buckets.
217 * High watermarks for triggering background reclaim:
218 * - FIFO has fewer than 512 entries left
219 * - fewer than 25% journal buckets free
221 * Background reclaim runs until low watermarks are reached:
222 * - FIFO has more than 1024 entries left
223 * - more than 50% journal buckets free
225 * As long as a reclaim can complete in the time it takes to fill up
226 * 512 journal entries or 25% of all journal buckets, then
227 * journal_next_bucket() should not stall.
229 void bch2_journal_reclaim_work(struct work_struct *work)
231 struct bch_fs *c = container_of(to_delayed_work(work),
232 struct bch_fs, journal.reclaim_work);
233 struct journal *j = &c->journal;
235 struct journal_entry_pin *pin;
236 u64 seq, seq_to_flush = 0;
237 unsigned iter, bucket_to_flush;
238 unsigned long next_flush;
239 bool reclaim_lock_held = false, need_flush;
242 * Advance last_idx to point to the oldest journal entry containing
243 * btree node updates that have not yet been written out
245 for_each_rw_member(ca, c, iter) {
246 struct journal_device *ja = &ca->journal;
251 while (should_discard_bucket(j, ja)) {
252 if (!reclaim_lock_held) {
255 * might be called from __journal_res_get()
256 * under wait_event() - have to go back to
257 * TASK_RUNNING before doing something that
258 * would block, but only if we're doing work:
260 __set_current_state(TASK_RUNNING);
262 mutex_lock(&j->reclaim_lock);
263 reclaim_lock_held = true;
264 /* recheck under reclaim_lock: */
268 if (ca->mi.discard &&
269 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
270 blkdev_issue_discard(ca->disk_sb.bdev,
272 ja->buckets[ja->last_idx]),
273 ca->mi.bucket_size, GFP_NOIO, 0);
276 ja->last_idx = (ja->last_idx + 1) % ja->nr;
277 spin_unlock(&j->lock);
283 * Write out enough btree nodes to free up 50% journal
287 bucket_to_flush = (ja->cur_idx + (ja->nr >> 1)) % ja->nr;
288 seq_to_flush = max_t(u64, seq_to_flush,
289 ja->bucket_seq[bucket_to_flush]);
290 spin_unlock(&j->lock);
293 /* Also flush if the pin fifo is more than half full */
295 seq_to_flush = max_t(s64, seq_to_flush,
296 (s64) journal_cur_seq(j) -
300 * If it's been longer than j->reclaim_delay_ms since we last flushed,
301 * make sure to flush at least one journal pin:
303 next_flush = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
304 need_flush = time_after(jiffies, next_flush);
306 while ((pin = journal_get_next_pin(j, need_flush
308 : seq_to_flush, &seq))) {
309 if (!reclaim_lock_held) {
310 spin_unlock(&j->lock);
311 __set_current_state(TASK_RUNNING);
312 mutex_lock(&j->reclaim_lock);
313 reclaim_lock_held = true;
318 journal_pin_mark_flushing(j, pin, seq);
319 spin_unlock(&j->lock);
321 journal_pin_flush(j, pin, seq);
324 j->last_flushed = jiffies;
329 spin_unlock(&j->lock);
331 if (reclaim_lock_held)
332 mutex_unlock(&j->reclaim_lock);
334 if (!test_bit(BCH_FS_RO, &c->flags))
335 queue_delayed_work(system_freezable_wq, &j->reclaim_work,
336 msecs_to_jiffies(j->reclaim_delay_ms));
339 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
340 struct journal_entry_pin **pin,
347 ret = bch2_journal_error(j);
353 * If journal replay hasn't completed, the unreplayed journal entries
354 * hold refs on their corresponding sequence numbers
356 ret = (*pin = journal_get_next_pin(j, seq_to_flush, pin_seq)) != NULL ||
357 !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
358 journal_last_seq(j) > seq_to_flush ||
359 (fifo_used(&j->pin) == 1 &&
360 atomic_read(&fifo_peek_front(&j->pin).count) == 1);
362 journal_pin_mark_flushing(j, *pin, *pin_seq);
364 spin_unlock(&j->lock);
369 void bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
371 struct journal_entry_pin *pin;
374 if (!test_bit(JOURNAL_STARTED, &j->flags))
377 mutex_lock(&j->reclaim_lock);
380 wait_event(j->wait, journal_flush_done(j, seq_to_flush,
385 journal_pin_flush(j, pin, pin_seq);
388 mutex_unlock(&j->reclaim_lock);
391 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
393 struct bch_fs *c = container_of(j, struct bch_fs, journal);
394 struct journal_entry_pin_list *p;
395 struct bch_devs_list devs;
400 fifo_for_each_entry_ptr(p, &j->pin, iter)
402 ? bch2_dev_list_has_dev(p->devs, dev_idx)
403 : p->devs.nr < c->opts.metadata_replicas)
405 spin_unlock(&j->lock);
407 bch2_journal_flush_pins(j, seq);
409 ret = bch2_journal_error(j);
413 mutex_lock(&c->replicas_gc_lock);
414 bch2_replicas_gc_start(c, 1 << BCH_DATA_JOURNAL);
419 while (!ret && seq < j->pin.back) {
420 seq = max(seq, journal_last_seq(j));
421 devs = journal_seq_pin(j, seq)->devs;
424 spin_unlock(&j->lock);
425 ret = bch2_mark_replicas(c, BCH_DATA_JOURNAL, devs);
428 spin_unlock(&j->lock);
430 ret = bch2_replicas_gc_end(c, ret);
431 mutex_unlock(&c->replicas_gc_lock);