]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
Update bcachefs sources to 9a555a741e80 bcachefs: omit alignment attribute on big...
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_key_cache.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
11 #include "btree_gc.h"
12 #include "btree_write_buffer.h"
13 #include "buckets.h"
14 #include "buckets_waiting_for_journal.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "lru.h"
20 #include "recovery.h"
21 #include "trace.h"
22 #include "varint.h"
23
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/random.h>
27 #include <linux/rculist.h>
28 #include <linux/rcupdate.h>
29 #include <linux/sched/task.h>
30 #include <linux/sort.h>
31
32 /* Persistent alloc info: */
33
34 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
35 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
36         BCH_ALLOC_FIELDS_V1()
37 #undef x
38 };
39
40 struct bkey_alloc_unpacked {
41         u64             journal_seq;
42         u8              gen;
43         u8              oldest_gen;
44         u8              data_type;
45         bool            need_discard:1;
46         bool            need_inc_gen:1;
47 #define x(_name, _bits) u##_bits _name;
48         BCH_ALLOC_FIELDS_V2()
49 #undef  x
50 };
51
52 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
53                                      const void **p, unsigned field)
54 {
55         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
56         u64 v;
57
58         if (!(a->fields & (1 << field)))
59                 return 0;
60
61         switch (bytes) {
62         case 1:
63                 v = *((const u8 *) *p);
64                 break;
65         case 2:
66                 v = le16_to_cpup(*p);
67                 break;
68         case 4:
69                 v = le32_to_cpup(*p);
70                 break;
71         case 8:
72                 v = le64_to_cpup(*p);
73                 break;
74         default:
75                 BUG();
76         }
77
78         *p += bytes;
79         return v;
80 }
81
82 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
83                                  struct bkey_s_c k)
84 {
85         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
86         const void *d = in->data;
87         unsigned idx = 0;
88
89         out->gen = in->gen;
90
91 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
92         BCH_ALLOC_FIELDS_V1()
93 #undef  x
94 }
95
96 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
97                                 struct bkey_s_c k)
98 {
99         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
100         const u8 *in = a.v->data;
101         const u8 *end = bkey_val_end(a);
102         unsigned fieldnr = 0;
103         int ret;
104         u64 v;
105
106         out->gen        = a.v->gen;
107         out->oldest_gen = a.v->oldest_gen;
108         out->data_type  = a.v->data_type;
109
110 #define x(_name, _bits)                                                 \
111         if (fieldnr < a.v->nr_fields) {                                 \
112                 ret = bch2_varint_decode_fast(in, end, &v);             \
113                 if (ret < 0)                                            \
114                         return ret;                                     \
115                 in += ret;                                              \
116         } else {                                                        \
117                 v = 0;                                                  \
118         }                                                               \
119         out->_name = v;                                                 \
120         if (v != out->_name)                                            \
121                 return -1;                                              \
122         fieldnr++;
123
124         BCH_ALLOC_FIELDS_V2()
125 #undef  x
126         return 0;
127 }
128
129 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
130                                 struct bkey_s_c k)
131 {
132         struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
133         const u8 *in = a.v->data;
134         const u8 *end = bkey_val_end(a);
135         unsigned fieldnr = 0;
136         int ret;
137         u64 v;
138
139         out->gen        = a.v->gen;
140         out->oldest_gen = a.v->oldest_gen;
141         out->data_type  = a.v->data_type;
142         out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
143         out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
144         out->journal_seq = le64_to_cpu(a.v->journal_seq);
145
146 #define x(_name, _bits)                                                 \
147         if (fieldnr < a.v->nr_fields) {                                 \
148                 ret = bch2_varint_decode_fast(in, end, &v);             \
149                 if (ret < 0)                                            \
150                         return ret;                                     \
151                 in += ret;                                              \
152         } else {                                                        \
153                 v = 0;                                                  \
154         }                                                               \
155         out->_name = v;                                                 \
156         if (v != out->_name)                                            \
157                 return -1;                                              \
158         fieldnr++;
159
160         BCH_ALLOC_FIELDS_V2()
161 #undef  x
162         return 0;
163 }
164
165 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
166 {
167         struct bkey_alloc_unpacked ret = { .gen = 0 };
168
169         switch (k.k->type) {
170         case KEY_TYPE_alloc:
171                 bch2_alloc_unpack_v1(&ret, k);
172                 break;
173         case KEY_TYPE_alloc_v2:
174                 bch2_alloc_unpack_v2(&ret, k);
175                 break;
176         case KEY_TYPE_alloc_v3:
177                 bch2_alloc_unpack_v3(&ret, k);
178                 break;
179         }
180
181         return ret;
182 }
183
184 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
185 {
186         unsigned i, bytes = offsetof(struct bch_alloc, data);
187
188         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
189                 if (a->fields & (1 << i))
190                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
191
192         return DIV_ROUND_UP(bytes, sizeof(u64));
193 }
194
195 int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
196                           enum bkey_invalid_flags flags,
197                           struct printbuf *err)
198 {
199         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
200         int ret = 0;
201
202         /* allow for unknown fields */
203         bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
204                          alloc_v1_val_size_bad,
205                          "incorrect value size (%zu < %u)",
206                          bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
207 fsck_err:
208         return ret;
209 }
210
211 int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
212                           enum bkey_invalid_flags flags,
213                           struct printbuf *err)
214 {
215         struct bkey_alloc_unpacked u;
216         int ret = 0;
217
218         bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
219                          alloc_v2_unpack_error,
220                          "unpack error");
221 fsck_err:
222         return ret;
223 }
224
225 int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
226                           enum bkey_invalid_flags flags,
227                           struct printbuf *err)
228 {
229         struct bkey_alloc_unpacked u;
230         int ret = 0;
231
232         bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
233                          alloc_v2_unpack_error,
234                          "unpack error");
235 fsck_err:
236         return ret;
237 }
238
239 int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
240                           enum bkey_invalid_flags flags, struct printbuf *err)
241 {
242         struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
243         int ret = 0;
244
245         bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err,
246                          alloc_v4_val_size_bad,
247                          "bad val size (%u > %zu)",
248                          alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
249
250         bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
251                          BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
252                          alloc_v4_backpointers_start_bad,
253                          "invalid backpointers_start");
254
255         bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
256                          alloc_key_data_type_bad,
257                          "invalid data type (got %u should be %u)",
258                          a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
259
260         switch (a.v->data_type) {
261         case BCH_DATA_free:
262         case BCH_DATA_need_gc_gens:
263         case BCH_DATA_need_discard:
264                 bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
265                                  c, err, alloc_key_empty_but_have_data,
266                                  "empty data type free but have data");
267                 break;
268         case BCH_DATA_sb:
269         case BCH_DATA_journal:
270         case BCH_DATA_btree:
271         case BCH_DATA_user:
272         case BCH_DATA_parity:
273                 bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
274                                  c, err, alloc_key_dirty_sectors_0,
275                                  "data_type %s but dirty_sectors==0",
276                                  bch2_data_type_str(a.v->data_type));
277                 break;
278         case BCH_DATA_cached:
279                 bkey_fsck_err_on(!a.v->cached_sectors ||
280                                  bch2_bucket_sectors_dirty(*a.v) ||
281                                  a.v->stripe,
282                                  c, err, alloc_key_cached_inconsistency,
283                                  "data type inconsistency");
284
285                 bkey_fsck_err_on(!a.v->io_time[READ] &&
286                                  c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
287                                  c, err, alloc_key_cached_but_read_time_zero,
288                                  "cached bucket with read_time == 0");
289                 break;
290         case BCH_DATA_stripe:
291                 break;
292         }
293 fsck_err:
294         return ret;
295 }
296
297 void bch2_alloc_v4_swab(struct bkey_s k)
298 {
299         struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
300         struct bch_backpointer *bp, *bps;
301
302         a->journal_seq          = swab64(a->journal_seq);
303         a->flags                = swab32(a->flags);
304         a->dirty_sectors        = swab32(a->dirty_sectors);
305         a->cached_sectors       = swab32(a->cached_sectors);
306         a->io_time[0]           = swab64(a->io_time[0]);
307         a->io_time[1]           = swab64(a->io_time[1]);
308         a->stripe               = swab32(a->stripe);
309         a->nr_external_backpointers = swab32(a->nr_external_backpointers);
310         a->fragmentation_lru    = swab64(a->fragmentation_lru);
311
312         bps = alloc_v4_backpointers(a);
313         for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
314                 bp->bucket_offset       = swab40(bp->bucket_offset);
315                 bp->bucket_len          = swab32(bp->bucket_len);
316                 bch2_bpos_swab(&bp->pos);
317         }
318 }
319
320 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
321 {
322         struct bch_alloc_v4 _a;
323         const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
324
325         prt_newline(out);
326         printbuf_indent_add(out, 2);
327
328         prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
329         bch2_prt_data_type(out, a->data_type);
330         prt_newline(out);
331         prt_printf(out, "journal_seq       %llu",       a->journal_seq);
332         prt_newline(out);
333         prt_printf(out, "need_discard      %llu",       BCH_ALLOC_V4_NEED_DISCARD(a));
334         prt_newline(out);
335         prt_printf(out, "need_inc_gen      %llu",       BCH_ALLOC_V4_NEED_INC_GEN(a));
336         prt_newline(out);
337         prt_printf(out, "dirty_sectors     %u", a->dirty_sectors);
338         prt_newline(out);
339         prt_printf(out, "cached_sectors    %u", a->cached_sectors);
340         prt_newline(out);
341         prt_printf(out, "stripe            %u", a->stripe);
342         prt_newline(out);
343         prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
344         prt_newline(out);
345         prt_printf(out, "io_time[READ]     %llu",       a->io_time[READ]);
346         prt_newline(out);
347         prt_printf(out, "io_time[WRITE]    %llu",       a->io_time[WRITE]);
348         prt_newline(out);
349         prt_printf(out, "fragmentation     %llu",       a->fragmentation_lru);
350         prt_newline(out);
351         prt_printf(out, "bp_start          %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
352         printbuf_indent_sub(out, 2);
353 }
354
355 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
356 {
357         if (k.k->type == KEY_TYPE_alloc_v4) {
358                 void *src, *dst;
359
360                 *out = *bkey_s_c_to_alloc_v4(k).v;
361
362                 src = alloc_v4_backpointers(out);
363                 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
364                 dst = alloc_v4_backpointers(out);
365
366                 if (src < dst)
367                         memset(src, 0, dst - src);
368
369                 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
370         } else {
371                 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
372
373                 *out = (struct bch_alloc_v4) {
374                         .journal_seq            = u.journal_seq,
375                         .flags                  = u.need_discard,
376                         .gen                    = u.gen,
377                         .oldest_gen             = u.oldest_gen,
378                         .data_type              = u.data_type,
379                         .stripe_redundancy      = u.stripe_redundancy,
380                         .dirty_sectors          = u.dirty_sectors,
381                         .cached_sectors         = u.cached_sectors,
382                         .io_time[READ]          = u.read_time,
383                         .io_time[WRITE]         = u.write_time,
384                         .stripe                 = u.stripe,
385                 };
386
387                 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
388         }
389 }
390
391 static noinline struct bkey_i_alloc_v4 *
392 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
393 {
394         struct bkey_i_alloc_v4 *ret;
395
396         ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
397         if (IS_ERR(ret))
398                 return ret;
399
400         if (k.k->type == KEY_TYPE_alloc_v4) {
401                 void *src, *dst;
402
403                 bkey_reassemble(&ret->k_i, k);
404
405                 src = alloc_v4_backpointers(&ret->v);
406                 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
407                 dst = alloc_v4_backpointers(&ret->v);
408
409                 if (src < dst)
410                         memset(src, 0, dst - src);
411
412                 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
413                 set_alloc_v4_u64s(ret);
414         } else {
415                 bkey_alloc_v4_init(&ret->k_i);
416                 ret->k.p = k.k->p;
417                 bch2_alloc_to_v4(k, &ret->v);
418         }
419         return ret;
420 }
421
422 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
423 {
424         struct bkey_s_c_alloc_v4 a;
425
426         if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
427             ((a = bkey_s_c_to_alloc_v4(k), true) &&
428              BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
429                 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
430
431         return __bch2_alloc_to_v4_mut(trans, k);
432 }
433
434 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
435 {
436         return bch2_alloc_to_v4_mut_inlined(trans, k);
437 }
438
439 struct bkey_i_alloc_v4 *
440 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
441                               struct bpos pos)
442 {
443         struct bkey_s_c k;
444         struct bkey_i_alloc_v4 *a;
445         int ret;
446
447         k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
448                              BTREE_ITER_WITH_UPDATES|
449                              BTREE_ITER_CACHED|
450                              BTREE_ITER_INTENT);
451         ret = bkey_err(k);
452         if (unlikely(ret))
453                 return ERR_PTR(ret);
454
455         a = bch2_alloc_to_v4_mut_inlined(trans, k);
456         ret = PTR_ERR_OR_ZERO(a);
457         if (unlikely(ret))
458                 goto err;
459         return a;
460 err:
461         bch2_trans_iter_exit(trans, iter);
462         return ERR_PTR(ret);
463 }
464
465 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
466 {
467         *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
468
469         pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
470         return pos;
471 }
472
473 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
474 {
475         pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
476         pos.offset += offset;
477         return pos;
478 }
479
480 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
481 {
482         return k.k->type == KEY_TYPE_bucket_gens
483                 ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
484                 : 0;
485 }
486
487 int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
488                              enum bkey_invalid_flags flags,
489                              struct printbuf *err)
490 {
491         int ret = 0;
492
493         bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
494                          bucket_gens_val_size_bad,
495                          "bad val size (%zu != %zu)",
496                          bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
497 fsck_err:
498         return ret;
499 }
500
501 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
502 {
503         struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
504         unsigned i;
505
506         for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
507                 if (i)
508                         prt_char(out, ' ');
509                 prt_printf(out, "%u", g.v->gens[i]);
510         }
511 }
512
513 int bch2_bucket_gens_init(struct bch_fs *c)
514 {
515         struct btree_trans *trans = bch2_trans_get(c);
516         struct bkey_i_bucket_gens g;
517         bool have_bucket_gens_key = false;
518         int ret;
519
520         ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
521                                  BTREE_ITER_PREFETCH, k, ({
522                 /*
523                  * Not a fsck error because this is checked/repaired by
524                  * bch2_check_alloc_key() which runs later:
525                  */
526                 if (!bch2_dev_bucket_exists(c, k.k->p))
527                         continue;
528
529                 struct bch_alloc_v4 a;
530                 u8 gen = bch2_alloc_to_v4(k, &a)->gen;
531                 unsigned offset;
532                 struct bpos pos = alloc_gens_pos(iter.pos, &offset);
533
534                 if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
535                         ret = commit_do(trans, NULL, NULL,
536                                         BCH_TRANS_COMMIT_no_enospc,
537                                 bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
538                         if (ret)
539                                 break;
540                         have_bucket_gens_key = false;
541                 }
542
543                 if (!have_bucket_gens_key) {
544                         bkey_bucket_gens_init(&g.k_i);
545                         g.k.p = pos;
546                         have_bucket_gens_key = true;
547                 }
548
549                 g.v.gens[offset] = gen;
550                 0;
551         }));
552
553         if (have_bucket_gens_key && !ret)
554                 ret = commit_do(trans, NULL, NULL,
555                                 BCH_TRANS_COMMIT_no_enospc,
556                         bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
557
558         bch2_trans_put(trans);
559
560         bch_err_fn(c, ret);
561         return ret;
562 }
563
564 int bch2_alloc_read(struct bch_fs *c)
565 {
566         struct btree_trans *trans = bch2_trans_get(c);
567         int ret;
568
569         down_read(&c->gc_lock);
570
571         if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
572                 ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
573                                          BTREE_ITER_PREFETCH, k, ({
574                         u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
575                         u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
576
577                         if (k.k->type != KEY_TYPE_bucket_gens)
578                                 continue;
579
580                         const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
581
582                         /*
583                          * Not a fsck error because this is checked/repaired by
584                          * bch2_check_alloc_key() which runs later:
585                          */
586                         if (!bch2_dev_exists2(c, k.k->p.inode))
587                                 continue;
588
589                         struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
590
591                         for (u64 b = max_t(u64, ca->mi.first_bucket, start);
592                              b < min_t(u64, ca->mi.nbuckets, end);
593                              b++)
594                                 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
595                         0;
596                 }));
597         } else {
598                 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
599                                          BTREE_ITER_PREFETCH, k, ({
600                         /*
601                          * Not a fsck error because this is checked/repaired by
602                          * bch2_check_alloc_key() which runs later:
603                          */
604                         if (!bch2_dev_bucket_exists(c, k.k->p))
605                                 continue;
606
607                         struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
608
609                         struct bch_alloc_v4 a;
610                         *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
611                         0;
612                 }));
613         }
614
615         bch2_trans_put(trans);
616         up_read(&c->gc_lock);
617
618         bch_err_fn(c, ret);
619         return ret;
620 }
621
622 /* Free space/discard btree: */
623
624 static int bch2_bucket_do_index(struct btree_trans *trans,
625                                 struct bkey_s_c alloc_k,
626                                 const struct bch_alloc_v4 *a,
627                                 bool set)
628 {
629         struct bch_fs *c = trans->c;
630         struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
631         struct btree_iter iter;
632         struct bkey_s_c old;
633         struct bkey_i *k;
634         enum btree_id btree;
635         enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
636         enum bch_bkey_type new_type =  set ? KEY_TYPE_set : KEY_TYPE_deleted;
637         struct printbuf buf = PRINTBUF;
638         int ret;
639
640         if (a->data_type != BCH_DATA_free &&
641             a->data_type != BCH_DATA_need_discard)
642                 return 0;
643
644         k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
645         if (IS_ERR(k))
646                 return PTR_ERR(k);
647
648         bkey_init(&k->k);
649         k->k.type = new_type;
650
651         switch (a->data_type) {
652         case BCH_DATA_free:
653                 btree = BTREE_ID_freespace;
654                 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
655                 bch2_key_resize(&k->k, 1);
656                 break;
657         case BCH_DATA_need_discard:
658                 btree = BTREE_ID_need_discard;
659                 k->k.p = alloc_k.k->p;
660                 break;
661         default:
662                 return 0;
663         }
664
665         old = bch2_bkey_get_iter(trans, &iter, btree,
666                              bkey_start_pos(&k->k),
667                              BTREE_ITER_INTENT);
668         ret = bkey_err(old);
669         if (ret)
670                 return ret;
671
672         if (ca->mi.freespace_initialized &&
673             c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
674             bch2_trans_inconsistent_on(old.k->type != old_type, trans,
675                         "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
676                         "  for %s",
677                         set ? "setting" : "clearing",
678                         bch2_btree_id_str(btree),
679                         iter.pos.inode,
680                         iter.pos.offset,
681                         bch2_bkey_types[old.k->type],
682                         bch2_bkey_types[old_type],
683                         (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
684                 ret = -EIO;
685                 goto err;
686         }
687
688         ret = bch2_trans_update(trans, &iter, k, 0);
689 err:
690         bch2_trans_iter_exit(trans, &iter);
691         printbuf_exit(&buf);
692         return ret;
693 }
694
695 static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
696                                            struct bpos bucket, u8 gen)
697 {
698         struct btree_iter iter;
699         unsigned offset;
700         struct bpos pos = alloc_gens_pos(bucket, &offset);
701         struct bkey_i_bucket_gens *g;
702         struct bkey_s_c k;
703         int ret;
704
705         g = bch2_trans_kmalloc(trans, sizeof(*g));
706         ret = PTR_ERR_OR_ZERO(g);
707         if (ret)
708                 return ret;
709
710         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
711                                BTREE_ITER_INTENT|
712                                BTREE_ITER_WITH_UPDATES);
713         ret = bkey_err(k);
714         if (ret)
715                 return ret;
716
717         if (k.k->type != KEY_TYPE_bucket_gens) {
718                 bkey_bucket_gens_init(&g->k_i);
719                 g->k.p = iter.pos;
720         } else {
721                 bkey_reassemble(&g->k_i, k);
722         }
723
724         g->v.gens[offset] = gen;
725
726         ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
727         bch2_trans_iter_exit(trans, &iter);
728         return ret;
729 }
730
731 int bch2_trigger_alloc(struct btree_trans *trans,
732                        enum btree_id btree, unsigned level,
733                        struct bkey_s_c old, struct bkey_s new,
734                        unsigned flags)
735 {
736         struct bch_fs *c = trans->c;
737         int ret = 0;
738
739         if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
740                                        "alloc key for invalid device or bucket"))
741                 return -EIO;
742
743         struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
744
745         struct bch_alloc_v4 old_a_convert;
746         const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
747
748         if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
749                 struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
750
751                 new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
752
753                 if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
754                         new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
755                         new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
756                         SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
757                         SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
758                 }
759
760                 if (data_type_is_empty(new_a->data_type) &&
761                     BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
762                     !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
763                         new_a->gen++;
764                         SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
765                 }
766
767                 if (old_a->data_type != new_a->data_type ||
768                     (new_a->data_type == BCH_DATA_free &&
769                      alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
770                         ret =   bch2_bucket_do_index(trans, old, old_a, false) ?:
771                                 bch2_bucket_do_index(trans, new.s_c, new_a, true);
772                         if (ret)
773                                 return ret;
774                 }
775
776                 if (new_a->data_type == BCH_DATA_cached &&
777                     !new_a->io_time[READ])
778                         new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
779
780                 u64 old_lru = alloc_lru_idx_read(*old_a);
781                 u64 new_lru = alloc_lru_idx_read(*new_a);
782                 if (old_lru != new_lru) {
783                         ret = bch2_lru_change(trans, new.k->p.inode,
784                                               bucket_to_u64(new.k->p),
785                                               old_lru, new_lru);
786                         if (ret)
787                                 return ret;
788                 }
789
790                 new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
791                                                 bch_dev_bkey_exists(c, new.k->p.inode));
792                 if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
793                         ret = bch2_lru_change(trans,
794                                         BCH_LRU_FRAGMENTATION_START,
795                                         bucket_to_u64(new.k->p),
796                                         old_a->fragmentation_lru, new_a->fragmentation_lru);
797                         if (ret)
798                                 return ret;
799                 }
800
801                 if (old_a->gen != new_a->gen) {
802                         ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
803                         if (ret)
804                                 return ret;
805                 }
806
807                 /*
808                  * need to know if we're getting called from the invalidate path or
809                  * not:
810                  */
811
812                 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
813                     old_a->cached_sectors) {
814                         ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
815                                                               -((s64) old_a->cached_sectors));
816                         if (ret)
817                                 return ret;
818                 }
819         }
820
821         if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
822                 struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
823                 u64 journal_seq = trans->journal_res.seq;
824                 u64 bucket_journal_seq = new_a->journal_seq;
825
826                 if ((flags & BTREE_TRIGGER_INSERT) &&
827                     data_type_is_empty(old_a->data_type) !=
828                     data_type_is_empty(new_a->data_type) &&
829                     new.k->type == KEY_TYPE_alloc_v4) {
830                         struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
831
832                         /*
833                          * If the btree updates referring to a bucket weren't flushed
834                          * before the bucket became empty again, then the we don't have
835                          * to wait on a journal flush before we can reuse the bucket:
836                          */
837                         v->journal_seq = bucket_journal_seq =
838                                 data_type_is_empty(new_a->data_type) &&
839                                 (journal_seq == v->journal_seq ||
840                                  bch2_journal_noflush_seq(&c->journal, v->journal_seq))
841                                 ? 0 : journal_seq;
842                 }
843
844                 if (!data_type_is_empty(old_a->data_type) &&
845                     data_type_is_empty(new_a->data_type) &&
846                     bucket_journal_seq) {
847                         ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
848                                         c->journal.flushed_seq_ondisk,
849                                         new.k->p.inode, new.k->p.offset,
850                                         bucket_journal_seq);
851                         if (ret) {
852                                 bch2_fs_fatal_error(c,
853                                         "error setting bucket_needs_journal_commit: %i", ret);
854                                 return ret;
855                         }
856                 }
857
858                 percpu_down_read(&c->mark_lock);
859                 if (new_a->gen != old_a->gen)
860                         *bucket_gen(ca, new.k->p.offset) = new_a->gen;
861
862                 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
863                 percpu_up_read(&c->mark_lock);
864
865 #define eval_state(_a, expr)            ({ const struct bch_alloc_v4 *a = _a; expr; })
866 #define statechange(expr)               !eval_state(old_a, expr) && eval_state(new_a, expr)
867 #define bucket_flushed(a)               (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk)
868
869                 if (statechange(a->data_type == BCH_DATA_free &&
870                                 bucket_flushed(a)))
871                         closure_wake_up(&c->freelist_wait);
872
873                 if (statechange(a->data_type == BCH_DATA_need_discard &&
874                                 bucket_flushed(a)) &&
875                     !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset))
876                         bch2_do_discards(c);
877
878                 if (statechange(a->data_type == BCH_DATA_cached) &&
879                     !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
880                     should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
881                         bch2_do_invalidates(c);
882
883                 if (statechange(a->data_type == BCH_DATA_need_gc_gens))
884                         bch2_do_gc_gens(c);
885         }
886
887         if ((flags & BTREE_TRIGGER_GC) &&
888             (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
889                 struct bch_alloc_v4 new_a_convert;
890                 const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
891
892                 percpu_down_read(&c->mark_lock);
893                 struct bucket *g = gc_bucket(ca, new.k->p.offset);
894
895                 bucket_lock(g);
896
897                 g->gen_valid            = 1;
898                 g->gen                  = new_a->gen;
899                 g->data_type            = new_a->data_type;
900                 g->stripe               = new_a->stripe;
901                 g->stripe_redundancy    = new_a->stripe_redundancy;
902                 g->dirty_sectors        = new_a->dirty_sectors;
903                 g->cached_sectors       = new_a->cached_sectors;
904
905                 bucket_unlock(g);
906                 percpu_up_read(&c->mark_lock);
907         }
908
909         return 0;
910 }
911
912 /*
913  * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
914  * extents style btrees, but works on non-extents btrees:
915  */
916 static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
917 {
918         struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
919
920         if (bkey_err(k))
921                 return k;
922
923         if (k.k->type) {
924                 return k;
925         } else {
926                 struct btree_iter iter2;
927                 struct bpos next;
928
929                 bch2_trans_copy_iter(&iter2, iter);
930
931                 struct btree_path *path = btree_iter_path(iter->trans, iter);
932                 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
933                         end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
934
935                 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
936
937                 /*
938                  * btree node min/max is a closed interval, upto takes a half
939                  * open interval:
940                  */
941                 k = bch2_btree_iter_peek_upto(&iter2, end);
942                 next = iter2.pos;
943                 bch2_trans_iter_exit(iter->trans, &iter2);
944
945                 BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
946
947                 if (bkey_err(k))
948                         return k;
949
950                 bkey_init(hole);
951                 hole->p = iter->pos;
952
953                 bch2_key_resize(hole, next.offset - iter->pos.offset);
954                 return (struct bkey_s_c) { hole, NULL };
955         }
956 }
957
958 static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
959 {
960         struct bch_dev *ca;
961
962         if (bch2_dev_bucket_exists(c, *bucket))
963                 return true;
964
965         if (bch2_dev_exists2(c, bucket->inode)) {
966                 ca = bch_dev_bkey_exists(c, bucket->inode);
967
968                 if (bucket->offset < ca->mi.first_bucket) {
969                         bucket->offset = ca->mi.first_bucket;
970                         return true;
971                 }
972
973                 bucket->inode++;
974                 bucket->offset = 0;
975         }
976
977         rcu_read_lock();
978         ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
979         if (ca)
980                 *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
981         rcu_read_unlock();
982
983         return ca != NULL;
984 }
985
986 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
987 {
988         struct bch_fs *c = iter->trans->c;
989         struct bkey_s_c k;
990 again:
991         k = bch2_get_key_or_hole(iter, POS_MAX, hole);
992         if (bkey_err(k))
993                 return k;
994
995         if (!k.k->type) {
996                 struct bpos bucket = bkey_start_pos(k.k);
997
998                 if (!bch2_dev_bucket_exists(c, bucket)) {
999                         if (!next_bucket(c, &bucket))
1000                                 return bkey_s_c_null;
1001
1002                         bch2_btree_iter_set_pos(iter, bucket);
1003                         goto again;
1004                 }
1005
1006                 if (!bch2_dev_bucket_exists(c, k.k->p)) {
1007                         struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
1008
1009                         bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
1010                 }
1011         }
1012
1013         return k;
1014 }
1015
1016 static noinline_for_stack
1017 int bch2_check_alloc_key(struct btree_trans *trans,
1018                          struct bkey_s_c alloc_k,
1019                          struct btree_iter *alloc_iter,
1020                          struct btree_iter *discard_iter,
1021                          struct btree_iter *freespace_iter,
1022                          struct btree_iter *bucket_gens_iter)
1023 {
1024         struct bch_fs *c = trans->c;
1025         struct bch_dev *ca;
1026         struct bch_alloc_v4 a_convert;
1027         const struct bch_alloc_v4 *a;
1028         unsigned discard_key_type, freespace_key_type;
1029         unsigned gens_offset;
1030         struct bkey_s_c k;
1031         struct printbuf buf = PRINTBUF;
1032         int ret;
1033
1034         if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
1035                         alloc_key_to_missing_dev_bucket,
1036                         "alloc key for invalid device:bucket %llu:%llu",
1037                         alloc_k.k->p.inode, alloc_k.k->p.offset))
1038                 return bch2_btree_delete_at(trans, alloc_iter, 0);
1039
1040         ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
1041         if (!ca->mi.freespace_initialized)
1042                 return 0;
1043
1044         a = bch2_alloc_to_v4(alloc_k, &a_convert);
1045
1046         discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1047         bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1048         k = bch2_btree_iter_peek_slot(discard_iter);
1049         ret = bkey_err(k);
1050         if (ret)
1051                 goto err;
1052
1053         if (k.k->type != discard_key_type &&
1054             (c->opts.reconstruct_alloc ||
1055              fsck_err(c, need_discard_key_wrong,
1056                       "incorrect key in need_discard btree (got %s should be %s)\n"
1057                       "  %s",
1058                       bch2_bkey_types[k.k->type],
1059                       bch2_bkey_types[discard_key_type],
1060                       (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1061                 struct bkey_i *update =
1062                         bch2_trans_kmalloc(trans, sizeof(*update));
1063
1064                 ret = PTR_ERR_OR_ZERO(update);
1065                 if (ret)
1066                         goto err;
1067
1068                 bkey_init(&update->k);
1069                 update->k.type  = discard_key_type;
1070                 update->k.p     = discard_iter->pos;
1071
1072                 ret = bch2_trans_update(trans, discard_iter, update, 0);
1073                 if (ret)
1074                         goto err;
1075         }
1076
1077         freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1078         bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1079         k = bch2_btree_iter_peek_slot(freespace_iter);
1080         ret = bkey_err(k);
1081         if (ret)
1082                 goto err;
1083
1084         if (k.k->type != freespace_key_type &&
1085             (c->opts.reconstruct_alloc ||
1086              fsck_err(c, freespace_key_wrong,
1087                       "incorrect key in freespace btree (got %s should be %s)\n"
1088                       "  %s",
1089                       bch2_bkey_types[k.k->type],
1090                       bch2_bkey_types[freespace_key_type],
1091                       (printbuf_reset(&buf),
1092                        bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1093                 struct bkey_i *update =
1094                         bch2_trans_kmalloc(trans, sizeof(*update));
1095
1096                 ret = PTR_ERR_OR_ZERO(update);
1097                 if (ret)
1098                         goto err;
1099
1100                 bkey_init(&update->k);
1101                 update->k.type  = freespace_key_type;
1102                 update->k.p     = freespace_iter->pos;
1103                 bch2_key_resize(&update->k, 1);
1104
1105                 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1106                 if (ret)
1107                         goto err;
1108         }
1109
1110         bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1111         k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1112         ret = bkey_err(k);
1113         if (ret)
1114                 goto err;
1115
1116         if (a->gen != alloc_gen(k, gens_offset) &&
1117             (c->opts.reconstruct_alloc ||
1118              fsck_err(c, bucket_gens_key_wrong,
1119                       "incorrect gen in bucket_gens btree (got %u should be %u)\n"
1120                       "  %s",
1121                       alloc_gen(k, gens_offset), a->gen,
1122                       (printbuf_reset(&buf),
1123                        bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1124                 struct bkey_i_bucket_gens *g =
1125                         bch2_trans_kmalloc(trans, sizeof(*g));
1126
1127                 ret = PTR_ERR_OR_ZERO(g);
1128                 if (ret)
1129                         goto err;
1130
1131                 if (k.k->type == KEY_TYPE_bucket_gens) {
1132                         bkey_reassemble(&g->k_i, k);
1133                 } else {
1134                         bkey_bucket_gens_init(&g->k_i);
1135                         g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1136                 }
1137
1138                 g->v.gens[gens_offset] = a->gen;
1139
1140                 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1141                 if (ret)
1142                         goto err;
1143         }
1144 err:
1145 fsck_err:
1146         printbuf_exit(&buf);
1147         return ret;
1148 }
1149
1150 static noinline_for_stack
1151 int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1152                                     struct bpos start,
1153                                     struct bpos *end,
1154                                     struct btree_iter *freespace_iter)
1155 {
1156         struct bch_fs *c = trans->c;
1157         struct bch_dev *ca;
1158         struct bkey_s_c k;
1159         struct printbuf buf = PRINTBUF;
1160         int ret;
1161
1162         ca = bch_dev_bkey_exists(c, start.inode);
1163         if (!ca->mi.freespace_initialized)
1164                 return 0;
1165
1166         bch2_btree_iter_set_pos(freespace_iter, start);
1167
1168         k = bch2_btree_iter_peek_slot(freespace_iter);
1169         ret = bkey_err(k);
1170         if (ret)
1171                 goto err;
1172
1173         *end = bkey_min(k.k->p, *end);
1174
1175         if (k.k->type != KEY_TYPE_set &&
1176             (c->opts.reconstruct_alloc ||
1177              fsck_err(c, freespace_hole_missing,
1178                       "hole in alloc btree missing in freespace btree\n"
1179                       "  device %llu buckets %llu-%llu",
1180                       freespace_iter->pos.inode,
1181                       freespace_iter->pos.offset,
1182                       end->offset))) {
1183                 struct bkey_i *update =
1184                         bch2_trans_kmalloc(trans, sizeof(*update));
1185
1186                 ret = PTR_ERR_OR_ZERO(update);
1187                 if (ret)
1188                         goto err;
1189
1190                 bkey_init(&update->k);
1191                 update->k.type  = KEY_TYPE_set;
1192                 update->k.p     = freespace_iter->pos;
1193                 bch2_key_resize(&update->k,
1194                                 min_t(u64, U32_MAX, end->offset -
1195                                       freespace_iter->pos.offset));
1196
1197                 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1198                 if (ret)
1199                         goto err;
1200         }
1201 err:
1202 fsck_err:
1203         printbuf_exit(&buf);
1204         return ret;
1205 }
1206
1207 static noinline_for_stack
1208 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1209                                       struct bpos start,
1210                                       struct bpos *end,
1211                                       struct btree_iter *bucket_gens_iter)
1212 {
1213         struct bch_fs *c = trans->c;
1214         struct bkey_s_c k;
1215         struct printbuf buf = PRINTBUF;
1216         unsigned i, gens_offset, gens_end_offset;
1217         int ret;
1218
1219         bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1220
1221         k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1222         ret = bkey_err(k);
1223         if (ret)
1224                 goto err;
1225
1226         if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1227                      alloc_gens_pos(*end,  &gens_end_offset)))
1228                 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1229
1230         if (k.k->type == KEY_TYPE_bucket_gens) {
1231                 struct bkey_i_bucket_gens g;
1232                 bool need_update = false;
1233
1234                 bkey_reassemble(&g.k_i, k);
1235
1236                 for (i = gens_offset; i < gens_end_offset; i++) {
1237                         if (fsck_err_on(g.v.gens[i], c,
1238                                         bucket_gens_hole_wrong,
1239                                         "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1240                                         bucket_gens_pos_to_alloc(k.k->p, i).inode,
1241                                         bucket_gens_pos_to_alloc(k.k->p, i).offset,
1242                                         g.v.gens[i])) {
1243                                 g.v.gens[i] = 0;
1244                                 need_update = true;
1245                         }
1246                 }
1247
1248                 if (need_update) {
1249                         struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1250
1251                         ret = PTR_ERR_OR_ZERO(u);
1252                         if (ret)
1253                                 goto err;
1254
1255                         memcpy(u, &g, sizeof(g));
1256
1257                         ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
1258                         if (ret)
1259                                 goto err;
1260                 }
1261         }
1262
1263         *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1264 err:
1265 fsck_err:
1266         printbuf_exit(&buf);
1267         return ret;
1268 }
1269
1270 static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
1271                                               struct btree_iter *iter)
1272 {
1273         struct bch_fs *c = trans->c;
1274         struct btree_iter alloc_iter;
1275         struct bkey_s_c alloc_k;
1276         struct bch_alloc_v4 a_convert;
1277         const struct bch_alloc_v4 *a;
1278         u64 genbits;
1279         struct bpos pos;
1280         enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1281                 ? BCH_DATA_need_discard
1282                 : BCH_DATA_free;
1283         struct printbuf buf = PRINTBUF;
1284         int ret;
1285
1286         pos = iter->pos;
1287         pos.offset &= ~(~0ULL << 56);
1288         genbits = iter->pos.offset & (~0ULL << 56);
1289
1290         alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1291         ret = bkey_err(alloc_k);
1292         if (ret)
1293                 return ret;
1294
1295         if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
1296                         need_discard_freespace_key_to_invalid_dev_bucket,
1297                         "entry in %s btree for nonexistant dev:bucket %llu:%llu",
1298                         bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
1299                 goto delete;
1300
1301         a = bch2_alloc_to_v4(alloc_k, &a_convert);
1302
1303         if (fsck_err_on(a->data_type != state ||
1304                         (state == BCH_DATA_free &&
1305                          genbits != alloc_freespace_genbits(*a)), c,
1306                         need_discard_freespace_key_bad,
1307                         "%s\n  incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
1308                         (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1309                         bch2_btree_id_str(iter->btree_id),
1310                         iter->pos.inode,
1311                         iter->pos.offset,
1312                         a->data_type == state,
1313                         genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1314                 goto delete;
1315 out:
1316 fsck_err:
1317         set_btree_iter_dontneed(&alloc_iter);
1318         bch2_trans_iter_exit(trans, &alloc_iter);
1319         printbuf_exit(&buf);
1320         return ret;
1321 delete:
1322         ret =   bch2_btree_delete_extent_at(trans, iter,
1323                         iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
1324                 bch2_trans_commit(trans, NULL, NULL,
1325                         BCH_TRANS_COMMIT_no_enospc);
1326         goto out;
1327 }
1328
1329 /*
1330  * We've already checked that generation numbers in the bucket_gens btree are
1331  * valid for buckets that exist; this just checks for keys for nonexistent
1332  * buckets.
1333  */
1334 static noinline_for_stack
1335 int bch2_check_bucket_gens_key(struct btree_trans *trans,
1336                                struct btree_iter *iter,
1337                                struct bkey_s_c k)
1338 {
1339         struct bch_fs *c = trans->c;
1340         struct bkey_i_bucket_gens g;
1341         struct bch_dev *ca;
1342         u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1343         u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1344         u64 b;
1345         bool need_update = false, dev_exists;
1346         struct printbuf buf = PRINTBUF;
1347         int ret = 0;
1348
1349         BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1350         bkey_reassemble(&g.k_i, k);
1351
1352         /* if no bch_dev, skip out whether we repair or not */
1353         dev_exists = bch2_dev_exists2(c, k.k->p.inode);
1354         if (!dev_exists) {
1355                 if (fsck_err_on(!dev_exists, c,
1356                                 bucket_gens_to_invalid_dev,
1357                                 "bucket_gens key for invalid device:\n  %s",
1358                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1359                         ret = bch2_btree_delete_at(trans, iter, 0);
1360                 }
1361                 goto out;
1362         }
1363
1364         ca = bch_dev_bkey_exists(c, k.k->p.inode);
1365         if (fsck_err_on(end <= ca->mi.first_bucket ||
1366                         start >= ca->mi.nbuckets, c,
1367                         bucket_gens_to_invalid_buckets,
1368                         "bucket_gens key for invalid buckets:\n  %s",
1369                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1370                 ret = bch2_btree_delete_at(trans, iter, 0);
1371                 goto out;
1372         }
1373
1374         for (b = start; b < ca->mi.first_bucket; b++)
1375                 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1376                                 bucket_gens_nonzero_for_invalid_buckets,
1377                                 "bucket_gens key has nonzero gen for invalid bucket")) {
1378                         g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1379                         need_update = true;
1380                 }
1381
1382         for (b = ca->mi.nbuckets; b < end; b++)
1383                 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1384                                 bucket_gens_nonzero_for_invalid_buckets,
1385                                 "bucket_gens key has nonzero gen for invalid bucket")) {
1386                         g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1387                         need_update = true;
1388                 }
1389
1390         if (need_update) {
1391                 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1392
1393                 ret = PTR_ERR_OR_ZERO(u);
1394                 if (ret)
1395                         goto out;
1396
1397                 memcpy(u, &g, sizeof(g));
1398                 ret = bch2_trans_update(trans, iter, u, 0);
1399         }
1400 out:
1401 fsck_err:
1402         printbuf_exit(&buf);
1403         return ret;
1404 }
1405
1406 int bch2_check_alloc_info(struct bch_fs *c)
1407 {
1408         struct btree_trans *trans = bch2_trans_get(c);
1409         struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1410         struct bkey hole;
1411         struct bkey_s_c k;
1412         int ret = 0;
1413
1414         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
1415                              BTREE_ITER_PREFETCH);
1416         bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1417                              BTREE_ITER_PREFETCH);
1418         bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1419                              BTREE_ITER_PREFETCH);
1420         bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1421                              BTREE_ITER_PREFETCH);
1422
1423         while (1) {
1424                 struct bpos next;
1425
1426                 bch2_trans_begin(trans);
1427
1428                 k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
1429                 ret = bkey_err(k);
1430                 if (ret)
1431                         goto bkey_err;
1432
1433                 if (!k.k)
1434                         break;
1435
1436                 if (k.k->type) {
1437                         next = bpos_nosnap_successor(k.k->p);
1438
1439                         ret = bch2_check_alloc_key(trans,
1440                                                    k, &iter,
1441                                                    &discard_iter,
1442                                                    &freespace_iter,
1443                                                    &bucket_gens_iter);
1444                         if (ret)
1445                                 goto bkey_err;
1446                 } else {
1447                         next = k.k->p;
1448
1449                         ret = bch2_check_alloc_hole_freespace(trans,
1450                                                     bkey_start_pos(k.k),
1451                                                     &next,
1452                                                     &freespace_iter) ?:
1453                                 bch2_check_alloc_hole_bucket_gens(trans,
1454                                                     bkey_start_pos(k.k),
1455                                                     &next,
1456                                                     &bucket_gens_iter);
1457                         if (ret)
1458                                 goto bkey_err;
1459                 }
1460
1461                 ret = bch2_trans_commit(trans, NULL, NULL,
1462                                         BCH_TRANS_COMMIT_no_enospc);
1463                 if (ret)
1464                         goto bkey_err;
1465
1466                 bch2_btree_iter_set_pos(&iter, next);
1467 bkey_err:
1468                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1469                         continue;
1470                 if (ret)
1471                         break;
1472         }
1473         bch2_trans_iter_exit(trans, &bucket_gens_iter);
1474         bch2_trans_iter_exit(trans, &freespace_iter);
1475         bch2_trans_iter_exit(trans, &discard_iter);
1476         bch2_trans_iter_exit(trans, &iter);
1477
1478         if (ret < 0)
1479                 goto err;
1480
1481         ret = for_each_btree_key(trans, iter,
1482                         BTREE_ID_need_discard, POS_MIN,
1483                         BTREE_ITER_PREFETCH, k,
1484                 bch2_check_discard_freespace_key(trans, &iter));
1485         if (ret)
1486                 goto err;
1487
1488         bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
1489                              BTREE_ITER_PREFETCH);
1490         while (1) {
1491                 bch2_trans_begin(trans);
1492                 k = bch2_btree_iter_peek(&iter);
1493                 if (!k.k)
1494                         break;
1495
1496                 ret = bkey_err(k) ?:
1497                         bch2_check_discard_freespace_key(trans, &iter);
1498                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1499                         ret = 0;
1500                         continue;
1501                 }
1502                 if (ret) {
1503                         struct printbuf buf = PRINTBUF;
1504                         bch2_bkey_val_to_text(&buf, c, k);
1505
1506                         bch_err(c, "while checking %s", buf.buf);
1507                         printbuf_exit(&buf);
1508                         break;
1509                 }
1510
1511                 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
1512         }
1513         bch2_trans_iter_exit(trans, &iter);
1514         if (ret)
1515                 goto err;
1516
1517         ret = for_each_btree_key_commit(trans, iter,
1518                         BTREE_ID_bucket_gens, POS_MIN,
1519                         BTREE_ITER_PREFETCH, k,
1520                         NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1521                 bch2_check_bucket_gens_key(trans, &iter, k));
1522 err:
1523         bch2_trans_put(trans);
1524         bch_err_fn(c, ret);
1525         return ret;
1526 }
1527
1528 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1529                                        struct btree_iter *alloc_iter)
1530 {
1531         struct bch_fs *c = trans->c;
1532         struct btree_iter lru_iter;
1533         struct bch_alloc_v4 a_convert;
1534         const struct bch_alloc_v4 *a;
1535         struct bkey_s_c alloc_k, lru_k;
1536         struct printbuf buf = PRINTBUF;
1537         int ret;
1538
1539         alloc_k = bch2_btree_iter_peek(alloc_iter);
1540         if (!alloc_k.k)
1541                 return 0;
1542
1543         ret = bkey_err(alloc_k);
1544         if (ret)
1545                 return ret;
1546
1547         a = bch2_alloc_to_v4(alloc_k, &a_convert);
1548
1549         if (a->data_type != BCH_DATA_cached)
1550                 return 0;
1551
1552         if (fsck_err_on(!a->io_time[READ], c,
1553                         alloc_key_cached_but_read_time_zero,
1554                         "cached bucket with read_time 0\n"
1555                         "  %s",
1556                 (printbuf_reset(&buf),
1557                  bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1558                 struct bkey_i_alloc_v4 *a_mut =
1559                         bch2_alloc_to_v4_mut(trans, alloc_k);
1560                 ret = PTR_ERR_OR_ZERO(a_mut);
1561                 if (ret)
1562                         goto err;
1563
1564                 a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1565                 ret = bch2_trans_update(trans, alloc_iter,
1566                                         &a_mut->k_i, BTREE_TRIGGER_NORUN);
1567                 if (ret)
1568                         goto err;
1569
1570                 a = &a_mut->v;
1571         }
1572
1573         lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
1574                              lru_pos(alloc_k.k->p.inode,
1575                                      bucket_to_u64(alloc_k.k->p),
1576                                      a->io_time[READ]), 0);
1577         ret = bkey_err(lru_k);
1578         if (ret)
1579                 return ret;
1580
1581         if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
1582                         alloc_key_to_missing_lru_entry,
1583                         "missing lru entry\n"
1584                         "  %s",
1585                         (printbuf_reset(&buf),
1586                          bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1587                 ret = bch2_lru_set(trans,
1588                                    alloc_k.k->p.inode,
1589                                    bucket_to_u64(alloc_k.k->p),
1590                                    a->io_time[READ]);
1591                 if (ret)
1592                         goto err;
1593         }
1594 err:
1595 fsck_err:
1596         bch2_trans_iter_exit(trans, &lru_iter);
1597         printbuf_exit(&buf);
1598         return ret;
1599 }
1600
1601 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1602 {
1603         int ret = bch2_trans_run(c,
1604                 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1605                                 POS_MIN, BTREE_ITER_PREFETCH, k,
1606                                 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1607                         bch2_check_alloc_to_lru_ref(trans, &iter)));
1608         bch_err_fn(c, ret);
1609         return ret;
1610 }
1611
1612 struct discard_buckets_state {
1613         u64             seen;
1614         u64             open;
1615         u64             need_journal_commit;
1616         u64             discarded;
1617         struct bch_dev  *ca;
1618         u64             need_journal_commit_this_dev;
1619 };
1620
1621 static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
1622 {
1623         if (s->ca == ca)
1624                 return;
1625
1626         if (s->ca && s->need_journal_commit_this_dev >
1627             bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
1628                 bch2_journal_flush_async(&c->journal, NULL);
1629
1630         if (s->ca)
1631                 percpu_ref_put(&s->ca->ref);
1632         if (ca)
1633                 percpu_ref_get(&ca->ref);
1634         s->ca = ca;
1635         s->need_journal_commit_this_dev = 0;
1636 }
1637
1638 static int bch2_discard_one_bucket(struct btree_trans *trans,
1639                                    struct btree_iter *need_discard_iter,
1640                                    struct bpos *discard_pos_done,
1641                                    struct discard_buckets_state *s)
1642 {
1643         struct bch_fs *c = trans->c;
1644         struct bpos pos = need_discard_iter->pos;
1645         struct btree_iter iter = { NULL };
1646         struct bkey_s_c k;
1647         struct bch_dev *ca;
1648         struct bkey_i_alloc_v4 *a;
1649         struct printbuf buf = PRINTBUF;
1650         int ret = 0;
1651
1652         ca = bch_dev_bkey_exists(c, pos.inode);
1653
1654         if (!percpu_ref_tryget(&ca->io_ref)) {
1655                 bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
1656                 return 0;
1657         }
1658
1659         discard_buckets_next_dev(c, s, ca);
1660
1661         if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1662                 s->open++;
1663                 goto out;
1664         }
1665
1666         if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1667                         c->journal.flushed_seq_ondisk,
1668                         pos.inode, pos.offset)) {
1669                 s->need_journal_commit++;
1670                 s->need_journal_commit_this_dev++;
1671                 goto out;
1672         }
1673
1674         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
1675                                need_discard_iter->pos,
1676                                BTREE_ITER_CACHED);
1677         ret = bkey_err(k);
1678         if (ret)
1679                 goto out;
1680
1681         a = bch2_alloc_to_v4_mut(trans, k);
1682         ret = PTR_ERR_OR_ZERO(a);
1683         if (ret)
1684                 goto out;
1685
1686         if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1687                 a->v.gen++;
1688                 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1689                 goto write;
1690         }
1691
1692         if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
1693                 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1694                         bch2_trans_inconsistent(trans,
1695                                 "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
1696                                 "%s",
1697                                 a->v.journal_seq,
1698                                 c->journal.flushed_seq_ondisk,
1699                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1700                         ret = -EIO;
1701                 }
1702                 goto out;
1703         }
1704
1705         if (a->v.data_type != BCH_DATA_need_discard) {
1706                 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1707                         bch2_trans_inconsistent(trans,
1708                                 "bucket incorrectly set in need_discard btree\n"
1709                                 "%s",
1710                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1711                         ret = -EIO;
1712                 }
1713
1714                 goto out;
1715         }
1716
1717         if (!bkey_eq(*discard_pos_done, iter.pos) &&
1718             ca->mi.discard && !c->opts.nochanges) {
1719                 /*
1720                  * This works without any other locks because this is the only
1721                  * thread that removes items from the need_discard tree
1722                  */
1723                 bch2_trans_unlock_long(trans);
1724                 blkdev_issue_discard(ca->disk_sb.bdev,
1725                                      k.k->p.offset * ca->mi.bucket_size,
1726                                      ca->mi.bucket_size,
1727                                      GFP_KERNEL);
1728                 *discard_pos_done = iter.pos;
1729
1730                 ret = bch2_trans_relock_notrace(trans);
1731                 if (ret)
1732                         goto out;
1733         }
1734
1735         SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1736         a->v.data_type = alloc_data_type(a->v, a->v.data_type);
1737 write:
1738         ret =   bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1739                 bch2_trans_commit(trans, NULL, NULL,
1740                                   BCH_WATERMARK_btree|
1741                                   BCH_TRANS_COMMIT_no_enospc);
1742         if (ret)
1743                 goto out;
1744
1745         count_event(c, bucket_discard);
1746         s->discarded++;
1747 out:
1748         s->seen++;
1749         bch2_trans_iter_exit(trans, &iter);
1750         percpu_ref_put(&ca->io_ref);
1751         printbuf_exit(&buf);
1752         return ret;
1753 }
1754
1755 static void bch2_do_discards_work(struct work_struct *work)
1756 {
1757         struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1758         struct discard_buckets_state s = {};
1759         struct bpos discard_pos_done = POS_MAX;
1760         int ret;
1761
1762         /*
1763          * We're doing the commit in bch2_discard_one_bucket instead of using
1764          * for_each_btree_key_commit() so that we can increment counters after
1765          * successful commit:
1766          */
1767         ret = bch2_trans_run(c,
1768                 for_each_btree_key(trans, iter,
1769                                    BTREE_ID_need_discard, POS_MIN, 0, k,
1770                         bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
1771
1772         discard_buckets_next_dev(c, &s, NULL);
1773
1774         trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
1775                               bch2_err_str(ret));
1776
1777         bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1778 }
1779
1780 void bch2_do_discards(struct bch_fs *c)
1781 {
1782         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
1783             !queue_work(c->write_ref_wq, &c->discard_work))
1784                 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1785 }
1786
1787 static int invalidate_one_bucket(struct btree_trans *trans,
1788                                  struct btree_iter *lru_iter,
1789                                  struct bkey_s_c lru_k,
1790                                  s64 *nr_to_invalidate)
1791 {
1792         struct bch_fs *c = trans->c;
1793         struct btree_iter alloc_iter = { NULL };
1794         struct bkey_i_alloc_v4 *a = NULL;
1795         struct printbuf buf = PRINTBUF;
1796         struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
1797         unsigned cached_sectors;
1798         int ret = 0;
1799
1800         if (*nr_to_invalidate <= 0)
1801                 return 1;
1802
1803         if (!bch2_dev_bucket_exists(c, bucket)) {
1804                 prt_str(&buf, "lru entry points to invalid bucket");
1805                 goto err;
1806         }
1807
1808         if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
1809                 return 0;
1810
1811         a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
1812         ret = PTR_ERR_OR_ZERO(a);
1813         if (ret)
1814                 goto out;
1815
1816         /* We expect harmless races here due to the btree write buffer: */
1817         if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
1818                 goto out;
1819
1820         BUG_ON(a->v.data_type != BCH_DATA_cached);
1821
1822         if (!a->v.cached_sectors)
1823                 bch_err(c, "invalidating empty bucket, confused");
1824
1825         cached_sectors = a->v.cached_sectors;
1826
1827         SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1828         a->v.gen++;
1829         a->v.data_type          = 0;
1830         a->v.dirty_sectors      = 0;
1831         a->v.cached_sectors     = 0;
1832         a->v.io_time[READ]      = atomic64_read(&c->io_clock[READ].now);
1833         a->v.io_time[WRITE]     = atomic64_read(&c->io_clock[WRITE].now);
1834
1835         ret =   bch2_trans_update(trans, &alloc_iter, &a->k_i,
1836                                 BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
1837                 bch2_trans_commit(trans, NULL, NULL,
1838                                   BCH_WATERMARK_btree|
1839                                   BCH_TRANS_COMMIT_no_enospc);
1840         if (ret)
1841                 goto out;
1842
1843         trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1844         --*nr_to_invalidate;
1845 out:
1846         bch2_trans_iter_exit(trans, &alloc_iter);
1847         printbuf_exit(&buf);
1848         return ret;
1849 err:
1850         prt_str(&buf, "\n  lru key: ");
1851         bch2_bkey_val_to_text(&buf, c, lru_k);
1852
1853         prt_str(&buf, "\n  lru entry: ");
1854         bch2_lru_pos_to_text(&buf, lru_iter->pos);
1855
1856         prt_str(&buf, "\n  alloc key: ");
1857         if (!a)
1858                 bch2_bpos_to_text(&buf, bucket);
1859         else
1860                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1861
1862         bch_err(c, "%s", buf.buf);
1863         if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
1864                 bch2_inconsistent_error(c);
1865                 ret = -EINVAL;
1866         }
1867
1868         goto out;
1869 }
1870
1871 static void bch2_do_invalidates_work(struct work_struct *work)
1872 {
1873         struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1874         struct btree_trans *trans = bch2_trans_get(c);
1875         int ret = 0;
1876
1877         ret = bch2_btree_write_buffer_tryflush(trans);
1878         if (ret)
1879                 goto err;
1880
1881         for_each_member_device(c, ca) {
1882                 s64 nr_to_invalidate =
1883                         should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
1884
1885                 ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
1886                                 lru_pos(ca->dev_idx, 0, 0),
1887                                 lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
1888                                 BTREE_ITER_INTENT, k,
1889                         invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
1890
1891                 if (ret < 0) {
1892                         percpu_ref_put(&ca->ref);
1893                         break;
1894                 }
1895         }
1896 err:
1897         bch2_trans_put(trans);
1898         bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1899 }
1900
1901 void bch2_do_invalidates(struct bch_fs *c)
1902 {
1903         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
1904             !queue_work(c->write_ref_wq, &c->invalidate_work))
1905                 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1906 }
1907
1908 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
1909                             u64 bucket_start, u64 bucket_end)
1910 {
1911         struct btree_trans *trans = bch2_trans_get(c);
1912         struct btree_iter iter;
1913         struct bkey_s_c k;
1914         struct bkey hole;
1915         struct bpos end = POS(ca->dev_idx, bucket_end);
1916         struct bch_member *m;
1917         unsigned long last_updated = jiffies;
1918         int ret;
1919
1920         BUG_ON(bucket_start > bucket_end);
1921         BUG_ON(bucket_end > ca->mi.nbuckets);
1922
1923         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
1924                 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
1925                 BTREE_ITER_PREFETCH);
1926         /*
1927          * Scan the alloc btree for every bucket on @ca, and add buckets to the
1928          * freespace/need_discard/need_gc_gens btrees as needed:
1929          */
1930         while (1) {
1931                 if (last_updated + HZ * 10 < jiffies) {
1932                         bch_info(ca, "%s: currently at %llu/%llu",
1933                                  __func__, iter.pos.offset, ca->mi.nbuckets);
1934                         last_updated = jiffies;
1935                 }
1936
1937                 bch2_trans_begin(trans);
1938
1939                 if (bkey_ge(iter.pos, end)) {
1940                         ret = 0;
1941                         break;
1942                 }
1943
1944                 k = bch2_get_key_or_hole(&iter, end, &hole);
1945                 ret = bkey_err(k);
1946                 if (ret)
1947                         goto bkey_err;
1948
1949                 if (k.k->type) {
1950                         /*
1951                          * We process live keys in the alloc btree one at a
1952                          * time:
1953                          */
1954                         struct bch_alloc_v4 a_convert;
1955                         const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1956
1957                         ret =   bch2_bucket_do_index(trans, k, a, true) ?:
1958                                 bch2_trans_commit(trans, NULL, NULL,
1959                                                   BCH_TRANS_COMMIT_no_enospc);
1960                         if (ret)
1961                                 goto bkey_err;
1962
1963                         bch2_btree_iter_advance(&iter);
1964                 } else {
1965                         struct bkey_i *freespace;
1966
1967                         freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
1968                         ret = PTR_ERR_OR_ZERO(freespace);
1969                         if (ret)
1970                                 goto bkey_err;
1971
1972                         bkey_init(&freespace->k);
1973                         freespace->k.type       = KEY_TYPE_set;
1974                         freespace->k.p          = k.k->p;
1975                         freespace->k.size       = k.k->size;
1976
1977                         ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
1978                                 bch2_trans_commit(trans, NULL, NULL,
1979                                                   BCH_TRANS_COMMIT_no_enospc);
1980                         if (ret)
1981                                 goto bkey_err;
1982
1983                         bch2_btree_iter_set_pos(&iter, k.k->p);
1984                 }
1985 bkey_err:
1986                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1987                         continue;
1988                 if (ret)
1989                         break;
1990         }
1991
1992         bch2_trans_iter_exit(trans, &iter);
1993         bch2_trans_put(trans);
1994
1995         if (ret < 0) {
1996                 bch_err_msg(ca, ret, "initializing free space");
1997                 return ret;
1998         }
1999
2000         mutex_lock(&c->sb_lock);
2001         m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
2002         SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
2003         mutex_unlock(&c->sb_lock);
2004
2005         return 0;
2006 }
2007
2008 int bch2_fs_freespace_init(struct bch_fs *c)
2009 {
2010         int ret = 0;
2011         bool doing_init = false;
2012
2013         /*
2014          * We can crash during the device add path, so we need to check this on
2015          * every mount:
2016          */
2017
2018         for_each_member_device(c, ca) {
2019                 if (ca->mi.freespace_initialized)
2020                         continue;
2021
2022                 if (!doing_init) {
2023                         bch_info(c, "initializing freespace");
2024                         doing_init = true;
2025                 }
2026
2027                 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
2028                 if (ret) {
2029                         percpu_ref_put(&ca->ref);
2030                         bch_err_fn(c, ret);
2031                         return ret;
2032                 }
2033         }
2034
2035         if (doing_init) {
2036                 mutex_lock(&c->sb_lock);
2037                 bch2_write_super(c);
2038                 mutex_unlock(&c->sb_lock);
2039                 bch_verbose(c, "done initializing freespace");
2040         }
2041
2042         return 0;
2043 }
2044
2045 /* Bucket IO clocks: */
2046
2047 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2048                               size_t bucket_nr, int rw)
2049 {
2050         struct bch_fs *c = trans->c;
2051         struct btree_iter iter;
2052         struct bkey_i_alloc_v4 *a;
2053         u64 now;
2054         int ret = 0;
2055
2056         a = bch2_trans_start_alloc_update(trans, &iter,  POS(dev, bucket_nr));
2057         ret = PTR_ERR_OR_ZERO(a);
2058         if (ret)
2059                 return ret;
2060
2061         now = atomic64_read(&c->io_clock[rw].now);
2062         if (a->v.io_time[rw] == now)
2063                 goto out;
2064
2065         a->v.io_time[rw] = now;
2066
2067         ret   = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2068                 bch2_trans_commit(trans, NULL, NULL, 0);
2069 out:
2070         bch2_trans_iter_exit(trans, &iter);
2071         return ret;
2072 }
2073
2074 /* Startup/shutdown (ro/rw): */
2075
2076 void bch2_recalc_capacity(struct bch_fs *c)
2077 {
2078         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2079         unsigned bucket_size_max = 0;
2080         unsigned long ra_pages = 0;
2081
2082         lockdep_assert_held(&c->state_lock);
2083
2084         for_each_online_member(c, ca) {
2085                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2086
2087                 ra_pages += bdi->ra_pages;
2088         }
2089
2090         bch2_set_ra_pages(c, ra_pages);
2091
2092         for_each_rw_member(c, ca) {
2093                 u64 dev_reserve = 0;
2094
2095                 /*
2096                  * We need to reserve buckets (from the number
2097                  * of currently available buckets) against
2098                  * foreground writes so that mainly copygc can
2099                  * make forward progress.
2100                  *
2101                  * We need enough to refill the various reserves
2102                  * from scratch - copygc will use its entire
2103                  * reserve all at once, then run against when
2104                  * its reserve is refilled (from the formerly
2105                  * available buckets).
2106                  *
2107                  * This reserve is just used when considering if
2108                  * allocations for foreground writes must wait -
2109                  * not -ENOSPC calculations.
2110                  */
2111
2112                 dev_reserve += ca->nr_btree_reserve * 2;
2113                 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2114
2115                 dev_reserve += 1;       /* btree write point */
2116                 dev_reserve += 1;       /* copygc write point */
2117                 dev_reserve += 1;       /* rebalance write point */
2118
2119                 dev_reserve *= ca->mi.bucket_size;
2120
2121                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2122                                              ca->mi.first_bucket);
2123
2124                 reserved_sectors += dev_reserve * 2;
2125
2126                 bucket_size_max = max_t(unsigned, bucket_size_max,
2127                                         ca->mi.bucket_size);
2128         }
2129
2130         gc_reserve = c->opts.gc_reserve_bytes
2131                 ? c->opts.gc_reserve_bytes >> 9
2132                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2133
2134         reserved_sectors = max(gc_reserve, reserved_sectors);
2135
2136         reserved_sectors = min(reserved_sectors, capacity);
2137
2138         c->capacity = capacity - reserved_sectors;
2139
2140         c->bucket_size_max = bucket_size_max;
2141
2142         /* Wake up case someone was waiting for buckets */
2143         closure_wake_up(&c->freelist_wait);
2144 }
2145
2146 u64 bch2_min_rw_member_capacity(struct bch_fs *c)
2147 {
2148         u64 ret = U64_MAX;
2149
2150         for_each_rw_member(c, ca)
2151                 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
2152         return ret;
2153 }
2154
2155 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2156 {
2157         struct open_bucket *ob;
2158         bool ret = false;
2159
2160         for (ob = c->open_buckets;
2161              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2162              ob++) {
2163                 spin_lock(&ob->lock);
2164                 if (ob->valid && !ob->on_partial_list &&
2165                     ob->dev == ca->dev_idx)
2166                         ret = true;
2167                 spin_unlock(&ob->lock);
2168         }
2169
2170         return ret;
2171 }
2172
2173 /* device goes ro: */
2174 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2175 {
2176         unsigned i;
2177
2178         /* First, remove device from allocation groups: */
2179
2180         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2181                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
2182
2183         /*
2184          * Capacity is calculated based off of devices in allocation groups:
2185          */
2186         bch2_recalc_capacity(c);
2187
2188         bch2_open_buckets_stop(c, ca, false);
2189
2190         /*
2191          * Wake up threads that were blocked on allocation, so they can notice
2192          * the device can no longer be removed and the capacity has changed:
2193          */
2194         closure_wake_up(&c->freelist_wait);
2195
2196         /*
2197          * journal_res_get() can block waiting for free space in the journal -
2198          * it needs to notice there may not be devices to allocate from anymore:
2199          */
2200         wake_up(&c->journal.wait);
2201
2202         /* Now wait for any in flight writes: */
2203
2204         closure_wait_event(&c->open_buckets_wait,
2205                            !bch2_dev_has_open_write_point(c, ca));
2206 }
2207
2208 /* device goes rw: */
2209 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2210 {
2211         unsigned i;
2212
2213         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2214                 if (ca->mi.data_allowed & (1 << i))
2215                         set_bit(ca->dev_idx, c->rw_devs[i].d);
2216 }
2217
2218 void bch2_fs_allocator_background_init(struct bch_fs *c)
2219 {
2220         spin_lock_init(&c->freelist_lock);
2221         INIT_WORK(&c->discard_work, bch2_do_discards_work);
2222         INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);
2223 }