4 #include "journal_reclaim.h"
9 * Journal entry pinning - machinery for holding a reference on a given journal
10 * entry, holding it open to ensure it gets replayed during recovery:
13 static inline void __journal_pin_add(struct journal *j,
15 struct journal_entry_pin *pin,
16 journal_pin_flush_fn flush_fn)
18 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
20 BUG_ON(journal_pin_active(pin));
21 BUG_ON(!atomic_read(&pin_list->count));
23 atomic_inc(&pin_list->count);
25 pin->flush = flush_fn;
28 list_add(&pin->list, &pin_list->list);
30 INIT_LIST_HEAD(&pin->list);
33 * If the journal is currently full, we might want to call flush_fn
39 void bch2_journal_pin_add(struct journal *j, u64 seq,
40 struct journal_entry_pin *pin,
41 journal_pin_flush_fn flush_fn)
44 __journal_pin_add(j, seq, pin, flush_fn);
45 spin_unlock(&j->lock);
48 static inline void __journal_pin_drop(struct journal *j,
49 struct journal_entry_pin *pin)
51 struct journal_entry_pin_list *pin_list;
53 if (!journal_pin_active(pin))
56 pin_list = journal_seq_pin(j, pin->seq);
58 list_del_init(&pin->list);
61 * Unpinning a journal entry make make journal_next_bucket() succeed, if
62 * writing a new last_seq will now make another bucket available:
64 if (atomic_dec_and_test(&pin_list->count) &&
65 pin_list == &fifo_peek_front(&j->pin))
66 bch2_journal_reclaim_fast(j);
69 void bch2_journal_pin_drop(struct journal *j,
70 struct journal_entry_pin *pin)
73 __journal_pin_drop(j, pin);
74 spin_unlock(&j->lock);
77 void bch2_journal_pin_add_if_older(struct journal *j,
78 struct journal_entry_pin *src_pin,
79 struct journal_entry_pin *pin,
80 journal_pin_flush_fn flush_fn)
84 if (journal_pin_active(src_pin) &&
85 (!journal_pin_active(pin) ||
86 src_pin->seq < pin->seq)) {
87 __journal_pin_drop(j, pin);
88 __journal_pin_add(j, src_pin->seq, pin, flush_fn);
91 spin_unlock(&j->lock);
94 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
96 BUG_ON(journal_pin_active(pin));
98 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
102 * Journal reclaim: flush references to open journal entries to reclaim space in
105 * May be done by the journal code in the background as needed to free up space
106 * for more journal entries, or as part of doing a clean shutdown, or to migrate
107 * data off of a specific device:
111 * bch2_journal_reclaim_fast - do the fast part of journal reclaim
113 * Called from IO submission context, does not block. Cleans up after btree
114 * write completions by advancing the journal pin and each cache's last_idx,
115 * kicking off discards and background reclaim as necessary.
117 void bch2_journal_reclaim_fast(struct journal *j)
119 struct journal_entry_pin_list temp;
122 lockdep_assert_held(&j->lock);
125 * Unpin journal entries whose reference counts reached zero, meaning
126 * all btree nodes got written out
128 while (!atomic_read(&fifo_peek_front(&j->pin).count)) {
129 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
130 BUG_ON(!fifo_pop(&j->pin, temp));
138 static void journal_pin_mark_flushing(struct journal *j,
139 struct journal_entry_pin *pin,
142 lockdep_assert_held(&j->reclaim_lock);
144 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
145 BUG_ON(j->flush_in_progress);
146 j->flush_in_progress = pin;
149 static void journal_pin_flush(struct journal *j,
150 struct journal_entry_pin *pin,
153 pin->flush(j, pin, seq);
155 BUG_ON(j->flush_in_progress != pin);
156 j->flush_in_progress = NULL;
157 wake_up(&j->pin_flush_wait);
160 static struct journal_entry_pin *
161 journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
163 struct journal_entry_pin_list *pin_list;
164 struct journal_entry_pin *ret = NULL;
166 /* no need to iterate over empty fifo entries: */
167 bch2_journal_reclaim_fast(j);
169 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
170 if (*seq > seq_to_flush ||
171 (ret = list_first_entry_or_null(&pin_list->list,
172 struct journal_entry_pin, list)))
178 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
184 (ja->last_idx != ja->cur_idx &&
185 ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk);
186 spin_unlock(&j->lock);
192 * bch2_journal_reclaim_work - free up journal buckets
194 * Background journal reclaim writes out btree nodes. It should be run
195 * early enough so that we never completely run out of journal buckets.
197 * High watermarks for triggering background reclaim:
198 * - FIFO has fewer than 512 entries left
199 * - fewer than 25% journal buckets free
201 * Background reclaim runs until low watermarks are reached:
202 * - FIFO has more than 1024 entries left
203 * - more than 50% journal buckets free
205 * As long as a reclaim can complete in the time it takes to fill up
206 * 512 journal entries or 25% of all journal buckets, then
207 * journal_next_bucket() should not stall.
209 void bch2_journal_reclaim_work(struct work_struct *work)
211 struct bch_fs *c = container_of(to_delayed_work(work),
212 struct bch_fs, journal.reclaim_work);
213 struct journal *j = &c->journal;
215 struct journal_entry_pin *pin;
216 u64 seq, seq_to_flush = 0;
217 unsigned iter, bucket_to_flush;
218 unsigned long next_flush;
219 bool reclaim_lock_held = false, need_flush;
222 * Advance last_idx to point to the oldest journal entry containing
223 * btree node updates that have not yet been written out
225 for_each_rw_member(ca, c, iter) {
226 struct journal_device *ja = &ca->journal;
231 while (should_discard_bucket(j, ja)) {
232 if (!reclaim_lock_held) {
235 * might be called from __journal_res_get()
236 * under wait_event() - have to go back to
237 * TASK_RUNNING before doing something that
238 * would block, but only if we're doing work:
240 __set_current_state(TASK_RUNNING);
242 mutex_lock(&j->reclaim_lock);
243 reclaim_lock_held = true;
244 /* recheck under reclaim_lock: */
248 if (ca->mi.discard &&
249 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
250 blkdev_issue_discard(ca->disk_sb.bdev,
252 ja->buckets[ja->last_idx]),
253 ca->mi.bucket_size, GFP_NOIO, 0);
256 ja->last_idx = (ja->last_idx + 1) % ja->nr;
257 spin_unlock(&j->lock);
263 * Write out enough btree nodes to free up 50% journal
267 bucket_to_flush = (ja->cur_idx + (ja->nr >> 1)) % ja->nr;
268 seq_to_flush = max_t(u64, seq_to_flush,
269 ja->bucket_seq[bucket_to_flush]);
270 spin_unlock(&j->lock);
273 /* Also flush if the pin fifo is more than half full */
275 seq_to_flush = max_t(s64, seq_to_flush,
276 (s64) journal_cur_seq(j) -
280 * If it's been longer than j->reclaim_delay_ms since we last flushed,
281 * make sure to flush at least one journal pin:
283 next_flush = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
284 need_flush = time_after(jiffies, next_flush);
286 while ((pin = journal_get_next_pin(j, need_flush
288 : seq_to_flush, &seq))) {
289 if (!reclaim_lock_held) {
290 spin_unlock(&j->lock);
291 __set_current_state(TASK_RUNNING);
292 mutex_lock(&j->reclaim_lock);
293 reclaim_lock_held = true;
298 journal_pin_mark_flushing(j, pin, seq);
299 spin_unlock(&j->lock);
301 journal_pin_flush(j, pin, seq);
304 j->last_flushed = jiffies;
309 spin_unlock(&j->lock);
311 if (reclaim_lock_held)
312 mutex_unlock(&j->reclaim_lock);
314 if (!test_bit(BCH_FS_RO, &c->flags))
315 queue_delayed_work(system_freezable_wq, &j->reclaim_work,
316 msecs_to_jiffies(j->reclaim_delay_ms));
319 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
320 struct journal_entry_pin **pin,
327 ret = bch2_journal_error(j);
333 * If journal replay hasn't completed, the unreplayed journal entries
334 * hold refs on their corresponding sequence numbers
336 ret = (*pin = journal_get_next_pin(j, seq_to_flush, pin_seq)) != NULL ||
337 !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
338 journal_last_seq(j) > seq_to_flush ||
339 (fifo_used(&j->pin) == 1 &&
340 atomic_read(&fifo_peek_front(&j->pin).count) == 1);
342 journal_pin_mark_flushing(j, *pin, *pin_seq);
344 spin_unlock(&j->lock);
349 void bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
351 struct journal_entry_pin *pin;
354 if (!test_bit(JOURNAL_STARTED, &j->flags))
357 mutex_lock(&j->reclaim_lock);
360 wait_event(j->wait, journal_flush_done(j, seq_to_flush,
365 journal_pin_flush(j, pin, pin_seq);
368 mutex_unlock(&j->reclaim_lock);
371 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
373 struct bch_fs *c = container_of(j, struct bch_fs, journal);
374 struct journal_entry_pin_list *p;
375 struct bch_devs_list devs;
380 fifo_for_each_entry_ptr(p, &j->pin, iter)
382 ? bch2_dev_list_has_dev(p->devs, dev_idx)
383 : p->devs.nr < c->opts.metadata_replicas)
385 spin_unlock(&j->lock);
387 bch2_journal_flush_pins(j, seq);
389 ret = bch2_journal_error(j);
393 mutex_lock(&c->replicas_gc_lock);
394 bch2_replicas_gc_start(c, 1 << BCH_DATA_JOURNAL);
399 while (!ret && seq < j->pin.back) {
400 seq = max(seq, journal_last_seq(j));
401 devs = journal_seq_pin(j, seq)->devs;
404 spin_unlock(&j->lock);
405 ret = bch2_mark_replicas(c, BCH_DATA_JOURNAL, devs);
408 spin_unlock(&j->lock);
410 ret = bch2_replicas_gc_end(c, ret);
411 mutex_unlock(&c->replicas_gc_lock);