]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/journal.c
Update bcachefs sources to 0d63ed13ea3d closures: Fix race in closure_sync()
[bcachefs-tools-debian] / libbcachefs / journal.c
index 80a612c0577f25d4228aabc3567d4e7aebe3de61..5b5d69f2316b216746c0c08db2346c2c8c95ff16 100644 (file)
@@ -63,6 +63,7 @@ journal_seq_to_buf(struct journal *j, u64 seq)
 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
 {
        unsigned i;
+
        for (i = 0; i < ARRAY_SIZE(p->list); i++)
                INIT_LIST_HEAD(&p->list[i]);
        INIT_LIST_HEAD(&p->flushed);
@@ -131,13 +132,21 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags)
        return stuck;
 }
 
-/* journal entry close/open: */
-
-void __bch2_journal_buf_put(struct journal *j)
+/*
+ * Final processing when the last reference of a journal buffer has been
+ * dropped. Drop the pin list reference acquired at journal entry open and write
+ * the buffer, if requested.
+ */
+void bch2_journal_buf_put_final(struct journal *j, u64 seq, bool write)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
 
-       closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
+       lockdep_assert_held(&j->lock);
+
+       if (__bch2_journal_pin_put(j, seq))
+               bch2_journal_reclaim_fast(j);
+       if (write)
+               closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
 }
 
 /*
@@ -203,13 +212,11 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val)
        buf->data->last_seq     = cpu_to_le64(buf->last_seq);
        BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
 
-       __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
-
        cancel_delayed_work(&j->write_work);
 
        bch2_journal_space_available(j);
 
-       bch2_journal_buf_put(j, old.idx);
+       __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
 }
 
 void bch2_journal_halt(struct journal *j)
@@ -514,8 +521,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
        int ret;
 
        closure_wait_event(&j->async_wait,
-                  (ret = __journal_res_get(j, res, flags)) !=
-                  -BCH_ERR_journal_res_get_blocked||
+                  (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
                   (flags & JOURNAL_RES_GET_NONBLOCK));
        return ret;
 }
@@ -588,8 +594,13 @@ out:
 
 /**
  * bch2_journal_flush_seq_async - wait for a journal entry to be written
+ * @j:         journal object
+ * @seq:       seq to flush
+ * @parent:    closure object to wait with
+ * Returns:    1 if @seq has already been flushed, 0 if @seq is being flushed,
+ *             -EIO if @seq will never be flushed
  *
- * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
+ * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
  * necessary
  */
 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
@@ -829,12 +840,12 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
                                break;
 
                        ret = bch2_trans_run(c,
-                               bch2_trans_mark_metadata_bucket(&trans, ca,
+                               bch2_trans_mark_metadata_bucket(trans, ca,
                                                ob[nr_got]->bucket, BCH_DATA_journal,
                                                ca->mi.bucket_size));
                        if (ret) {
                                bch2_open_bucket_put(c, ob[nr_got]);
-                               bch_err(c, "error marking new journal buckets: %s", bch2_err_str(ret));
+                               bch_err_msg(c, ret, "marking new journal buckets");
                                break;
                        }
 
@@ -910,7 +921,7 @@ err_unblock:
        if (ret && !new_fs)
                for (i = 0; i < nr_got; i++)
                        bch2_trans_run(c,
-                               bch2_trans_mark_metadata_bucket(&trans, ca,
+                               bch2_trans_mark_metadata_bucket(trans, ca,
                                                bu[i], BCH_DATA_free, 0));
 err_free:
        if (!new_fs)
@@ -944,7 +955,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
                goto unlock;
 
        while (ja->nr < nr) {
-               struct disk_reservation disk_res = { 0, 0 };
+               struct disk_reservation disk_res = { 0, 0, 0 };
 
                /*
                 * note: journal buckets aren't really counted as _sectors_ used yet, so
@@ -1008,6 +1019,25 @@ err:
        return ret;
 }
 
+int bch2_fs_journal_alloc(struct bch_fs *c)
+{
+       struct bch_dev *ca;
+       unsigned i;
+
+       for_each_online_member(ca, c, i) {
+               if (ca->journal.nr)
+                       continue;
+
+               int ret = bch2_dev_journal_alloc(ca);
+               if (ret) {
+                       percpu_ref_put(&ca->io_ref);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 /* startup/shutdown: */
 
 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
@@ -1159,9 +1189,9 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
 {
        struct journal_device *ja = &ca->journal;
        struct bch_sb_field_journal *journal_buckets =
-               bch2_sb_get_journal(sb);
+               bch2_sb_field_get(sb, journal);
        struct bch_sb_field_journal_v2 *journal_buckets_v2 =
-               bch2_sb_get_journal_v2(sb);
+               bch2_sb_field_get(sb, journal_v2);
        unsigned i, nr_bvecs;
 
        ja->nr = 0;