]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_update_leaf.c
Update bcachefs sources to f7670cba39 bcachefs: Fix for building in userspace
[bcachefs-tools-debian] / libbcachefs / btree_update_leaf.c
1
2 #include "bcachefs.h"
3 #include "btree_update.h"
4 #include "btree_update_interior.h"
5 #include "btree_io.h"
6 #include "btree_iter.h"
7 #include "btree_locking.h"
8 #include "buckets.h"
9 #include "debug.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "journal.h"
13 #include "journal_reclaim.h"
14 #include "keylist.h"
15 #include "replicas.h"
16
17 #include <linux/sort.h>
18 #include <trace/events/bcachefs.h>
19
20 /* Inserting into a given leaf node (last stage of insert): */
21
22 /* Handle overwrites and do insert, for non extents: */
23 bool bch2_btree_bset_insert_key(struct btree_iter *iter,
24                                 struct btree *b,
25                                 struct btree_node_iter *node_iter,
26                                 struct bkey_i *insert)
27 {
28         const struct bkey_format *f = &b->format;
29         struct bkey_packed *k;
30         unsigned clobber_u64s;
31
32         EBUG_ON(btree_node_just_written(b));
33         EBUG_ON(bset_written(b, btree_bset_last(b)));
34         EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
35         EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
36                 bkey_cmp(insert->k.p, b->data->max_key) > 0);
37
38         k = bch2_btree_node_iter_peek_all(node_iter, b);
39         if (k && !bkey_cmp_packed(b, k, &insert->k)) {
40                 BUG_ON(bkey_whiteout(k));
41
42                 if (!bkey_written(b, k) &&
43                     bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
44                     !bkey_whiteout(&insert->k)) {
45                         k->type = insert->k.type;
46                         memcpy_u64s(bkeyp_val(f, k), &insert->v,
47                                     bkey_val_u64s(&insert->k));
48                         return true;
49                 }
50
51                 insert->k.needs_whiteout = k->needs_whiteout;
52
53                 btree_account_key_drop(b, k);
54
55                 if (k >= btree_bset_last(b)->start) {
56                         clobber_u64s = k->u64s;
57
58                         /*
59                          * If we're deleting, and the key we're deleting doesn't
60                          * need a whiteout (it wasn't overwriting a key that had
61                          * been written to disk) - just delete it:
62                          */
63                         if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
64                                 bch2_bset_delete(b, k, clobber_u64s);
65                                 bch2_btree_node_iter_fix(iter, b, node_iter,
66                                                          k, clobber_u64s, 0);
67                                 bch2_btree_iter_verify(iter, b);
68                                 return true;
69                         }
70
71                         goto overwrite;
72                 }
73
74                 k->type = KEY_TYPE_deleted;
75                 bch2_btree_node_iter_fix(iter, b, node_iter, k,
76                                          k->u64s, k->u64s);
77                 bch2_btree_iter_verify(iter, b);
78
79                 if (bkey_whiteout(&insert->k)) {
80                         reserve_whiteout(b, k);
81                         return true;
82                 } else {
83                         k->needs_whiteout = false;
84                 }
85         } else {
86                 /*
87                  * Deleting, but the key to delete wasn't found - nothing to do:
88                  */
89                 if (bkey_whiteout(&insert->k))
90                         return false;
91
92                 insert->k.needs_whiteout = false;
93         }
94
95         k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
96         clobber_u64s = 0;
97 overwrite:
98         bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
99         if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
100                 bch2_btree_node_iter_fix(iter, b, node_iter, k,
101                                          clobber_u64s, k->u64s);
102         bch2_btree_iter_verify(iter, b);
103         return true;
104 }
105
106 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
107                                unsigned i, u64 seq)
108 {
109         struct bch_fs *c = container_of(j, struct bch_fs, journal);
110         struct btree_write *w = container_of(pin, struct btree_write, journal);
111         struct btree *b = container_of(w, struct btree, writes[i]);
112
113         btree_node_lock_type(c, b, SIX_LOCK_read);
114         bch2_btree_node_write_cond(c, b,
115                 (btree_current_write(b) == w && w->journal.seq == seq));
116         six_unlock_read(&b->lock);
117 }
118
119 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
120 {
121         return __btree_node_flush(j, pin, 0, seq);
122 }
123
124 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
125 {
126         return __btree_node_flush(j, pin, 1, seq);
127 }
128
129 static inline void __btree_journal_key(struct btree_insert *trans,
130                                        enum btree_id btree_id,
131                                        struct bkey_i *insert)
132 {
133         struct journal *j = &trans->c->journal;
134         u64 seq = trans->journal_res.seq;
135         bool needs_whiteout = insert->k.needs_whiteout;
136
137         /* ick */
138         insert->k.needs_whiteout = false;
139         bch2_journal_add_keys(j, &trans->journal_res,
140                               btree_id, insert);
141         insert->k.needs_whiteout = needs_whiteout;
142
143         bch2_journal_set_has_inode(j, &trans->journal_res,
144                                    insert->k.p.inode);
145
146         if (trans->journal_seq)
147                 *trans->journal_seq = seq;
148 }
149
150 void bch2_btree_journal_key(struct btree_insert *trans,
151                            struct btree_iter *iter,
152                            struct bkey_i *insert)
153 {
154         struct bch_fs *c = trans->c;
155         struct journal *j = &c->journal;
156         struct btree *b = iter->l[0].b;
157         struct btree_write *w = btree_current_write(b);
158
159         EBUG_ON(iter->level || b->level);
160         EBUG_ON(trans->journal_res.ref !=
161                 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
162
163         if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
164                 __btree_journal_key(trans, iter->btree_id, insert);
165                 btree_bset_last(b)->journal_seq =
166                         cpu_to_le64(trans->journal_res.seq);
167         }
168
169         if (unlikely(!journal_pin_active(&w->journal))) {
170                 u64 seq = likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
171                         ? trans->journal_res.seq
172                         : j->replay_journal_seq;
173
174                 bch2_journal_pin_add(j, seq, &w->journal,
175                                      btree_node_write_idx(b) == 0
176                                      ? btree_node_flush0
177                                      : btree_node_flush1);
178         }
179
180         if (unlikely(!btree_node_dirty(b)))
181                 set_btree_node_dirty(b);
182 }
183
184 static enum btree_insert_ret
185 bch2_insert_fixup_key(struct btree_insert *trans,
186                      struct btree_insert_entry *insert)
187 {
188         struct btree_iter *iter = insert->iter;
189         struct btree_iter_level *l = &iter->l[0];
190
191         EBUG_ON(iter->level);
192         EBUG_ON(insert->k->k.u64s >
193                 bch_btree_keys_u64s_remaining(trans->c, l->b));
194
195         if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
196                                        insert->k))
197                 bch2_btree_journal_key(trans, iter, insert->k);
198
199         return BTREE_INSERT_OK;
200 }
201
202 /**
203  * btree_insert_key - insert a key one key into a leaf node
204  */
205 static enum btree_insert_ret
206 btree_insert_key_leaf(struct btree_insert *trans,
207                       struct btree_insert_entry *insert)
208 {
209         struct bch_fs *c = trans->c;
210         struct btree_iter *iter = insert->iter;
211         struct btree *b = iter->l[0].b;
212         enum btree_insert_ret ret;
213         int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
214         int old_live_u64s = b->nr.live_u64s;
215         int live_u64s_added, u64s_added;
216
217         bch2_mark_update(trans, insert);
218
219         ret = !btree_node_is_extents(b)
220                 ? bch2_insert_fixup_key(trans, insert)
221                 : bch2_insert_fixup_extent(trans, insert);
222
223         live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
224         u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
225
226         if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
227                 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
228         if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
229                 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
230
231         if (u64s_added > live_u64s_added &&
232             bch2_maybe_compact_whiteouts(c, b))
233                 bch2_btree_iter_reinit_node(iter, b);
234
235         trace_btree_insert_key(c, b, insert->k);
236         return ret;
237 }
238
239 /* Deferred btree updates: */
240
241 static void deferred_update_flush(struct journal *j,
242                                         struct journal_entry_pin *pin,
243                                         u64 seq)
244 {
245         struct bch_fs *c = container_of(j, struct bch_fs, journal);
246         struct deferred_update *d =
247                 container_of(pin, struct deferred_update, journal);
248         u64 tmp[32];
249         struct bkey_i *k = (void *) tmp;
250         unsigned gen;
251         int ret;
252
253         if (d->allocated_u64s > ARRAY_SIZE(tmp)) {
254                 k = kmalloc(d->allocated_u64s * sizeof(u64), GFP_NOFS);
255
256                 BUG_ON(!k); /* XXX */
257         }
258
259         spin_lock(&d->lock);
260         gen = d->gen;
261
262         if (journal_pin_active(&d->journal)) {
263                 BUG_ON(d->k.k.u64s > d->allocated_u64s);
264                 bkey_copy(k, &d->k);
265
266                 spin_unlock(&d->lock);
267
268                 ret = bch2_btree_insert(c, d->btree_id, k, NULL, NULL,
269                                         BTREE_INSERT_NOFAIL);
270                 bch2_fs_fatal_err_on(ret && !bch2_journal_error(j),
271                         c, "error flushing deferred btree update: %i", ret);
272
273                 spin_lock(&d->lock);
274         }
275
276         if (gen == d->gen)
277                 bch2_journal_pin_drop(j, &d->journal);
278         spin_unlock(&d->lock);
279
280         if (k != (void *) tmp)
281                 kfree(k);
282 }
283
284 static enum btree_insert_ret
285 btree_insert_key_deferred(struct btree_insert *trans,
286                           struct btree_insert_entry *insert)
287 {
288         struct bch_fs *c = trans->c;
289         struct journal *j = &c->journal;
290         struct deferred_update *d = insert->d;
291
292         BUG_ON(trans->flags & BTREE_INSERT_JOURNAL_REPLAY);
293         BUG_ON(insert->k->u64s > d->allocated_u64s);
294
295         __btree_journal_key(trans, d->btree_id, insert->k);
296
297         spin_lock(&d->lock);
298         d->gen++;
299         bkey_copy(&d->k, insert->k);
300         spin_unlock(&d->lock);
301
302         bch2_journal_pin_update(j, trans->journal_res.seq, &d->journal,
303                                 deferred_update_flush);
304
305         return BTREE_INSERT_OK;
306 }
307
308 void bch2_deferred_update_free(struct bch_fs *c,
309                                struct deferred_update *d)
310 {
311         deferred_update_flush(&c->journal, &d->journal, 0);
312
313         BUG_ON(journal_pin_active(&d->journal));
314
315         bch2_journal_pin_flush(&c->journal, &d->journal);
316         kfree(d);
317 }
318
319 struct deferred_update *
320 bch2_deferred_update_alloc(struct bch_fs *c,
321                            enum btree_id btree_id,
322                            unsigned u64s)
323 {
324         struct deferred_update *d;
325
326         BUG_ON(u64s > U8_MAX);
327
328         d = kmalloc(offsetof(struct deferred_update, k) +
329                     u64s * sizeof(u64), GFP_NOFS);
330         BUG_ON(!d);
331
332         memset(d, 0, offsetof(struct deferred_update, k));
333
334         spin_lock_init(&d->lock);
335         d->allocated_u64s       = u64s;
336         d->btree_id             = btree_id;
337
338         return d;
339 }
340
341 /* struct btree_insert operations: */
342
343 /*
344  * We sort transaction entries so that if multiple iterators point to the same
345  * leaf node they'll be adjacent:
346  */
347 static bool same_leaf_as_prev(struct btree_insert *trans,
348                               struct btree_insert_entry *i)
349 {
350         return i != trans->entries &&
351                 !i->deferred &&
352                 i[0].iter->l[0].b == i[-1].iter->l[0].b;
353 }
354
355 #define __trans_next_entry(_trans, _i, _filter)                         \
356 ({                                                                      \
357         while ((_i) < (_trans)->entries + (_trans->nr) && !(_filter))   \
358                 (_i)++;                                                 \
359                                                                         \
360         (_i) < (_trans)->entries + (_trans->nr);                        \
361 })
362
363 #define __trans_for_each_entry(_trans, _i, _filter)                     \
364         for ((_i) = (_trans)->entries;                                  \
365              __trans_next_entry(_trans, _i, _filter);                   \
366              (_i)++)
367
368 #define trans_for_each_entry(trans, i)                                  \
369         __trans_for_each_entry(trans, i, true)
370
371 #define trans_for_each_iter(trans, i)                                   \
372         __trans_for_each_entry(trans, i, !(i)->deferred)
373
374 #define trans_for_each_leaf(trans, i)                                   \
375         __trans_for_each_entry(trans, i, !(i)->deferred &&              \
376                                !same_leaf_as_prev(trans, i))
377
378 inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
379                                             struct btree_iter *iter)
380 {
381         bch2_btree_node_lock_write(b, iter);
382
383         if (btree_node_just_written(b) &&
384             bch2_btree_post_write_cleanup(c, b))
385                 bch2_btree_iter_reinit_node(iter, b);
386
387         /*
388          * If the last bset has been written, or if it's gotten too big - start
389          * a new bset to insert into:
390          */
391         if (want_new_bset(c, b))
392                 bch2_btree_init_next(c, b, iter);
393 }
394
395 static void multi_lock_write(struct bch_fs *c, struct btree_insert *trans)
396 {
397         struct btree_insert_entry *i;
398
399         trans_for_each_leaf(trans, i)
400                 bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
401 }
402
403 static void multi_unlock_write(struct btree_insert *trans)
404 {
405         struct btree_insert_entry *i;
406
407         trans_for_each_leaf(trans, i)
408                 bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
409 }
410
411 static inline int btree_trans_cmp(struct btree_insert_entry l,
412                                   struct btree_insert_entry r)
413 {
414         return (l.deferred > r.deferred) - (l.deferred < r.deferred) ?:
415                 btree_iter_cmp(l.iter, r.iter);
416 }
417
418 /* Normal update interface: */
419
420 static enum btree_insert_ret
421 btree_key_can_insert(struct btree_insert *trans,
422                      struct btree_insert_entry *insert,
423                      unsigned *u64s)
424 {
425         struct bch_fs *c = trans->c;
426         struct btree *b = insert->iter->l[0].b;
427         static enum btree_insert_ret ret;
428
429         if (unlikely(btree_node_fake(b)))
430                 return BTREE_INSERT_BTREE_NODE_FULL;
431
432         if (!bch2_bkey_replicas_marked(c,
433                         bkey_i_to_s_c(insert->k),
434                         true))
435                 return BTREE_INSERT_NEED_MARK_REPLICAS;
436
437         ret = !btree_node_is_extents(b)
438                 ? BTREE_INSERT_OK
439                 : bch2_extent_can_insert(trans, insert, u64s);
440         if (ret)
441                 return ret;
442
443         if (*u64s > bch_btree_keys_u64s_remaining(c, b))
444                 return BTREE_INSERT_BTREE_NODE_FULL;
445
446         return BTREE_INSERT_OK;
447 }
448
449 static inline enum btree_insert_ret
450 do_btree_insert_one(struct btree_insert *trans,
451                     struct btree_insert_entry *insert)
452 {
453         return likely(!insert->deferred)
454                 ? btree_insert_key_leaf(trans, insert)
455                 : btree_insert_key_deferred(trans, insert);
456 }
457
458 /*
459  * Get journal reservation, take write locks, and attempt to do btree update(s):
460  */
461 static inline int do_btree_insert_at(struct btree_insert *trans,
462                                      struct btree_insert_entry **stopped_at)
463 {
464         struct bch_fs *c = trans->c;
465         struct btree_insert_entry *i;
466         struct btree_iter *linked;
467         unsigned u64s;
468         int ret;
469
470         trans_for_each_iter(trans, i)
471                 BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
472
473         /* reserve space for deferred updates */
474         __trans_for_each_entry(trans, i, i->deferred) {
475
476         }
477
478         memset(&trans->journal_res, 0, sizeof(trans->journal_res));
479
480         if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
481                 u64s = 0;
482                 trans_for_each_entry(trans, i)
483                         u64s += jset_u64s(i->k->k.u64s);
484
485                 while ((ret = bch2_journal_res_get(&c->journal,
486                                         &trans->journal_res, u64s,
487                                         JOURNAL_RES_GET_NONBLOCK)) == -EAGAIN) {
488                         struct btree_iter *iter = NULL;
489
490                         trans_for_each_iter(trans, i)
491                                 iter = i->iter;
492
493                         if (iter)
494                                 bch2_btree_iter_unlock(iter);
495
496                         ret = bch2_journal_res_get(&c->journal,
497                                         &trans->journal_res, u64s,
498                                         JOURNAL_RES_GET_CHECK);
499                         if (ret)
500                                 return ret;
501
502                         if (iter && !bch2_btree_iter_relock(iter)) {
503                                 trans_restart(" (iter relock after journal res get blocked)");
504                                 return -EINTR;
505                         }
506                 }
507
508                 if (ret)
509                         return ret;
510         }
511
512         multi_lock_write(c, trans);
513
514         if (race_fault()) {
515                 ret = -EINTR;
516                 trans_restart(" (race)");
517                 goto out;
518         }
519
520         /*
521          * Check if the insert will fit in the leaf node with the write lock
522          * held, otherwise another thread could write the node changing the
523          * amount of space available:
524          */
525         u64s = 0;
526         trans_for_each_iter(trans, i) {
527                 /* Multiple inserts might go to same leaf: */
528                 if (!same_leaf_as_prev(trans, i))
529                         u64s = 0;
530
531                 u64s += i->k->k.u64s;
532                 ret = btree_key_can_insert(trans, i, &u64s);
533                 if (ret) {
534                         *stopped_at = i;
535                         goto out;
536                 }
537         }
538
539         if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) {
540                 if (journal_seq_verify(c))
541                         trans_for_each_entry(trans, i)
542                                 i->k->k.version.lo = trans->journal_res.seq;
543                 else if (inject_invalid_keys(c))
544                         trans_for_each_entry(trans, i)
545                                 i->k->k.version = MAX_VERSION;
546         }
547
548         if (trans->flags & BTREE_INSERT_NOUNLOCK) {
549                 /*
550                  * linked iterators that weren't being updated may or may not
551                  * have been traversed/locked, depending on what the caller was
552                  * doing:
553                  */
554                 trans_for_each_iter(trans, i) {
555                         for_each_btree_iter(i->iter, linked)
556                                 if (linked->uptodate < BTREE_ITER_NEED_RELOCK)
557                                         linked->flags |= BTREE_ITER_NOUNLOCK;
558                         break;
559                 }
560         }
561         trans->did_work = true;
562
563         trans_for_each_entry(trans, i) {
564                 switch (do_btree_insert_one(trans, i)) {
565                 case BTREE_INSERT_OK:
566                         break;
567                 case BTREE_INSERT_NEED_TRAVERSE:
568                         BUG_ON((trans->flags &
569                                 (BTREE_INSERT_ATOMIC|BTREE_INSERT_NOUNLOCK)));
570                         ret = -EINTR;
571                         goto out;
572                 default:
573                         BUG();
574                 }
575         }
576 out:
577         multi_unlock_write(trans);
578         bch2_journal_res_put(&c->journal, &trans->journal_res);
579
580         return ret;
581 }
582
583 static inline void btree_insert_entry_checks(struct bch_fs *c,
584                                              struct btree_insert_entry *i)
585 {
586         enum btree_id btree_id = !i->deferred
587                 ? i->iter->btree_id
588                 : i->d->btree_id;
589
590         if (!i->deferred) {
591                 BUG_ON(i->iter->level);
592                 BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
593
594                 bch2_btree_iter_verify_locks(i->iter);
595         }
596
597         BUG_ON(debug_check_bkeys(c) &&
598                !bkey_deleted(&i->k->k) &&
599                bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), btree_id));
600 }
601
602 /**
603  * __bch_btree_insert_at - insert keys at given iterator positions
604  *
605  * This is main entry point for btree updates.
606  *
607  * Return values:
608  * -EINTR: locking changed, this function should be called again. Only returned
609  *  if passed BTREE_INSERT_ATOMIC.
610  * -EROFS: filesystem read only
611  * -EIO: journal or btree node IO error
612  */
613 int __bch2_btree_insert_at(struct btree_insert *trans)
614 {
615         struct bch_fs *c = trans->c;
616         struct btree_insert_entry *i;
617         struct btree_iter *linked;
618         unsigned flags;
619         int ret;
620
621         BUG_ON(!trans->nr);
622
623         /* for the sake of sanity: */
624         BUG_ON(trans->nr > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
625
626         bubble_sort(trans->entries, trans->nr, btree_trans_cmp);
627
628         trans_for_each_entry(trans, i)
629                 btree_insert_entry_checks(c, i);
630
631         if (unlikely(!percpu_ref_tryget(&c->writes)))
632                 return -EROFS;
633 retry:
634         trans_for_each_iter(trans, i) {
635                 unsigned old_locks_want = i->iter->locks_want;
636                 unsigned old_uptodate = i->iter->uptodate;
637
638                 if (!bch2_btree_iter_upgrade(i->iter, 1, true)) {
639                         trans_restart(" (failed upgrade, locks_want %u uptodate %u)",
640                                       old_locks_want, old_uptodate);
641                         ret = -EINTR;
642                         goto err;
643                 }
644
645                 if (i->iter->flags & BTREE_ITER_ERROR) {
646                         ret = -EIO;
647                         goto err;
648                 }
649         }
650
651         ret = do_btree_insert_at(trans, &i);
652         if (unlikely(ret))
653                 goto err;
654
655         trans_for_each_leaf(trans, i)
656                 bch2_foreground_maybe_merge(c, i->iter, 0, trans->flags);
657
658         trans_for_each_iter(trans, i)
659                 bch2_btree_iter_downgrade(i->iter);
660 out:
661         percpu_ref_put(&c->writes);
662
663         /* make sure we didn't drop or screw up locks: */
664         trans_for_each_iter(trans, i) {
665                 bch2_btree_iter_verify_locks(i->iter);
666                 break;
667         }
668
669         trans_for_each_iter(trans, i) {
670                 for_each_btree_iter(i->iter, linked)
671                         linked->flags &= ~BTREE_ITER_NOUNLOCK;
672                 break;
673         }
674
675         BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
676
677         return ret;
678 err:
679         flags = trans->flags;
680
681         /*
682          * BTREE_INSERT_NOUNLOCK means don't unlock _after_ successful btree
683          * update; if we haven't done anything yet it doesn't apply
684          */
685         if (!trans->did_work)
686                 flags &= ~BTREE_INSERT_NOUNLOCK;
687
688         switch (ret) {
689         case BTREE_INSERT_BTREE_NODE_FULL:
690                 ret = bch2_btree_split_leaf(c, i->iter, flags);
691
692                 /*
693                  * if the split succeeded without dropping locks the insert will
694                  * still be atomic (in the BTREE_INSERT_ATOMIC sense, what the
695                  * caller peeked() and is overwriting won't have changed)
696                  */
697 #if 0
698                 /*
699                  * XXX:
700                  * split -> btree node merging (of parent node) might still drop
701                  * locks when we're not passing it BTREE_INSERT_NOUNLOCK
702                  */
703                 if (!ret && !trans->did_work)
704                         goto retry;
705 #endif
706
707                 /*
708                  * don't care if we got ENOSPC because we told split it
709                  * couldn't block:
710                  */
711                 if (!ret || (flags & BTREE_INSERT_NOUNLOCK)) {
712                         trans_restart(" (split)");
713                         ret = -EINTR;
714                 }
715                 break;
716         case BTREE_INSERT_NEED_GC_LOCK:
717                 ret = -EINTR;
718
719                 if (!down_read_trylock(&c->gc_lock)) {
720                         if (flags & BTREE_INSERT_NOUNLOCK)
721                                 goto out;
722
723                         bch2_btree_iter_unlock(trans->entries[0].iter);
724                         down_read(&c->gc_lock);
725                 }
726                 up_read(&c->gc_lock);
727                 break;
728         case BTREE_INSERT_ENOSPC:
729                 ret = -ENOSPC;
730                 break;
731         case BTREE_INSERT_NEED_MARK_REPLICAS:
732                 if (flags & BTREE_INSERT_NOUNLOCK) {
733                         ret = -EINTR;
734                         goto out;
735                 }
736
737                 bch2_btree_iter_unlock(trans->entries[0].iter);
738                 ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(i->k))
739                         ?: -EINTR;
740                 break;
741         default:
742                 BUG_ON(ret >= 0);
743                 break;
744         }
745
746         if (ret == -EINTR) {
747                 if (flags & BTREE_INSERT_NOUNLOCK) {
748                         trans_restart(" (can't unlock)");
749                         goto out;
750                 }
751
752                 trans_for_each_iter(trans, i) {
753                         int ret2 = bch2_btree_iter_traverse(i->iter);
754                         if (ret2) {
755                                 ret = ret2;
756                                 trans_restart(" (traverse)");
757                                 goto out;
758                         }
759
760                         BUG_ON(i->iter->uptodate > BTREE_ITER_NEED_PEEK);
761                 }
762
763                 /*
764                  * BTREE_ITER_ATOMIC means we have to return -EINTR if we
765                  * dropped locks:
766                  */
767                 if (!(flags & BTREE_INSERT_ATOMIC))
768                         goto retry;
769
770                 trans_restart(" (atomic)");
771         }
772
773         goto out;
774 }
775
776 int bch2_trans_commit(struct btree_trans *trans,
777                       struct disk_reservation *disk_res,
778                       u64 *journal_seq,
779                       unsigned flags)
780 {
781         struct btree_insert insert = {
782                 .c              = trans->c,
783                 .disk_res       = disk_res,
784                 .journal_seq    = journal_seq,
785                 .flags          = flags,
786                 .nr             = trans->nr_updates,
787                 .entries        = trans->updates,
788         };
789
790         if (!trans->nr_updates)
791                 return 0;
792
793         trans->nr_updates = 0;
794
795         return __bch2_btree_insert_at(&insert);
796 }
797
798 int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
799 {
800         struct bkey_i k;
801
802         bkey_init(&k.k);
803         k.k.p = iter->pos;
804
805         return bch2_btree_insert_at(iter->c, NULL, NULL,
806                                     BTREE_INSERT_NOFAIL|
807                                     BTREE_INSERT_USE_RESERVE|flags,
808                                     BTREE_INSERT_ENTRY(iter, &k));
809 }
810
811 int bch2_btree_insert_list_at(struct btree_iter *iter,
812                              struct keylist *keys,
813                              struct disk_reservation *disk_res,
814                              u64 *journal_seq, unsigned flags)
815 {
816         BUG_ON(flags & BTREE_INSERT_ATOMIC);
817         BUG_ON(bch2_keylist_empty(keys));
818         bch2_verify_keylist_sorted(keys);
819
820         while (!bch2_keylist_empty(keys)) {
821                 int ret = bch2_btree_insert_at(iter->c, disk_res,
822                                 journal_seq, flags,
823                                 BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
824                 if (ret)
825                         return ret;
826
827                 bch2_keylist_pop_front(keys);
828         }
829
830         return 0;
831 }
832
833 /**
834  * bch_btree_insert - insert keys into the extent btree
835  * @c:                  pointer to struct bch_fs
836  * @id:                 btree to insert into
837  * @insert_keys:        list of keys to insert
838  * @hook:               insert callback
839  */
840 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
841                      struct bkey_i *k,
842                      struct disk_reservation *disk_res,
843                      u64 *journal_seq, int flags)
844 {
845         struct btree_iter iter;
846         int ret;
847
848         bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
849                              BTREE_ITER_INTENT);
850         ret = bch2_btree_insert_at(c, disk_res, journal_seq, flags,
851                                    BTREE_INSERT_ENTRY(&iter, k));
852         bch2_btree_iter_unlock(&iter);
853
854         return ret;
855 }
856
857 /*
858  * bch_btree_delete_range - delete everything within a given range
859  *
860  * Range is a half open interval - [start, end)
861  */
862 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
863                             struct bpos start, struct bpos end,
864                             u64 *journal_seq)
865 {
866         struct btree_iter iter;
867         struct bkey_s_c k;
868         int ret = 0;
869
870         bch2_btree_iter_init(&iter, c, id, start,
871                              BTREE_ITER_INTENT);
872
873         while ((k = bch2_btree_iter_peek(&iter)).k &&
874                !(ret = btree_iter_err(k)) &&
875                bkey_cmp(iter.pos, end) < 0) {
876                 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
877                 /* really shouldn't be using a bare, unpadded bkey_i */
878                 struct bkey_i delete;
879
880                 bkey_init(&delete.k);
881
882                 /*
883                  * For extents, iter.pos won't necessarily be the same as
884                  * bkey_start_pos(k.k) (for non extents they always will be the
885                  * same). It's important that we delete starting from iter.pos
886                  * because the range we want to delete could start in the middle
887                  * of k.
888                  *
889                  * (bch2_btree_iter_peek() does guarantee that iter.pos >=
890                  * bkey_start_pos(k.k)).
891                  */
892                 delete.k.p = iter.pos;
893
894                 if (iter.flags & BTREE_ITER_IS_EXTENTS) {
895                         /* create the biggest key we can */
896                         bch2_key_resize(&delete.k, max_sectors);
897                         bch2_cut_back(end, &delete.k);
898                 }
899
900                 ret = bch2_btree_insert_at(c, NULL, journal_seq,
901                                            BTREE_INSERT_NOFAIL,
902                                            BTREE_INSERT_ENTRY(&iter, &delete));
903                 if (ret)
904                         break;
905
906                 bch2_btree_iter_cond_resched(&iter);
907         }
908
909         bch2_btree_iter_unlock(&iter);
910         return ret;
911 }