*
* Synchronous updates are specified by passing a closure (@flush_cl) to
* bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
- * down to the journalling code. That closure will will wait on the journal
- * write to complete (via closure_wait()).
+ * down to the journalling code. That closure will wait on the journal write to
+ * complete (via closure_wait()).
*
* If the index update wasn't synchronous, the journal entry will be
* written out after 10 ms have elapsed, by default (the delay_ms field
return j->seq_ondisk + 1;
}
-void bch2_journal_set_has_inum(struct journal *, u64, u64);
-
static inline int journal_state_count(union journal_res_state s, int idx)
{
switch (idx) {
return vstruct_idx(j->buf[res->idx].data, res->offset);
}
-static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type,
+static inline unsigned journal_entry_init(struct jset_entry *entry, unsigned type,
enum btree_id id, unsigned level,
- const void *data, unsigned u64s)
+ unsigned u64s)
{
entry->u64s = cpu_to_le16(u64s);
entry->btree_id = id;
entry->pad[0] = 0;
entry->pad[1] = 0;
entry->pad[2] = 0;
- memcpy_u64s_small(entry->_data, data, u64s);
-
return jset_u64s(u64s);
}
-static inline void bch2_journal_add_entry(struct journal *j, struct journal_res *res,
- unsigned type, enum btree_id id,
- unsigned level,
+static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type,
+ enum btree_id id, unsigned level,
const void *data, unsigned u64s)
{
- unsigned actual = journal_entry_set(journal_res_entry(j, res),
- type, id, level, data, u64s);
+ unsigned ret = journal_entry_init(entry, type, id, level, u64s);
+
+ memcpy_u64s_small(entry->_data, data, u64s);
+ return ret;
+}
+
+static inline struct jset_entry *
+bch2_journal_add_entry(struct journal *j, struct journal_res *res,
+ unsigned type, enum btree_id id,
+ unsigned level, unsigned u64s)
+{
+ struct jset_entry *entry = journal_res_entry(j, res);
+ unsigned actual = journal_entry_init(entry, type, id, level, u64s);
EBUG_ON(!res->ref);
EBUG_ON(actual > res->u64s);
res->offset += actual;
res->u64s -= actual;
-}
-
-static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res,
- enum btree_id id, unsigned level,
- const struct bkey_i *k)
-{
- bch2_journal_add_entry(j, res, BCH_JSET_ENTRY_btree_keys,
- id, level, k, k->k.u64s);
+ return entry;
}
static inline bool journal_entry_empty(struct jset *j)
return true;
}
-void __bch2_journal_buf_put(struct journal *);
-
-static inline void bch2_journal_buf_put(struct journal *j, unsigned idx)
+/*
+ * Drop reference on a buffer index and return true if the count has hit zero.
+ */
+static inline union journal_res_state journal_state_buf_put(struct journal *j, unsigned idx)
{
union journal_res_state s;
.buf2_count = idx == 2,
.buf3_count = idx == 3,
}).v, &j->reservations.counter);
+ return s;
+}
+
+void bch2_journal_buf_put_final(struct journal *, u64, bool);
+
+static inline void __bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq)
+{
+ union journal_res_state s;
+
+ s = journal_state_buf_put(j, idx);
+ if (!journal_state_count(s, idx))
+ bch2_journal_buf_put_final(j, seq, idx == s.unwritten_idx);
+}
- if (!journal_state_count(s, idx) && idx == s.unwritten_idx)
- __bch2_journal_buf_put(j);
+static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq)
+{
+ union journal_res_state s;
+
+ s = journal_state_buf_put(j, idx);
+ if (!journal_state_count(s, idx)) {
+ spin_lock(&j->lock);
+ bch2_journal_buf_put_final(j, seq, idx == s.unwritten_idx);
+ spin_unlock(&j->lock);
+ }
}
/*
while (res->u64s)
bch2_journal_add_entry(j, res,
BCH_JSET_ENTRY_btree_keys,
- 0, 0, NULL, 0);
+ 0, 0, 0);
- bch2_journal_buf_put(j, res->idx);
+ bch2_journal_buf_put(j, res->idx, res->seq);
res->ref = 0;
}
int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
unsigned);
-#define JOURNAL_RES_GET_NONBLOCK (1 << 0)
-#define JOURNAL_RES_GET_CHECK (1 << 1)
-#define JOURNAL_RES_GET_RESERVED (1 << 2)
+/* First bits for BCH_WATERMARK: */
+enum journal_res_flags {
+ __JOURNAL_RES_GET_NONBLOCK = BCH_WATERMARK_BITS,
+ __JOURNAL_RES_GET_CHECK,
+};
+
+#define JOURNAL_RES_GET_NONBLOCK (1 << __JOURNAL_RES_GET_NONBLOCK)
+#define JOURNAL_RES_GET_CHECK (1 << __JOURNAL_RES_GET_CHECK)
static inline int journal_res_get_fast(struct journal *j,
struct journal_res *res,
EBUG_ON(!journal_state_count(new, new.idx));
- if (!(flags & JOURNAL_RES_GET_RESERVED) &&
- !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags))
+ if ((flags & BCH_WATERMARK_MASK) < j->watermark)
return 0;
new.cur_entry_offset += res->u64s;
return 0;
}
-/* journal_preres: */
-
-static inline bool journal_check_may_get_unreserved(struct journal *j)
-{
- union journal_preres_state s = READ_ONCE(j->prereserved);
- bool ret = s.reserved < s.remaining &&
- fifo_free(&j->pin) > j->pin.size / 4;
-
- lockdep_assert_held(&j->lock);
-
- if (ret != test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
- if (ret) {
- set_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags);
- journal_wake(j);
- } else {
- clear_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags);
- }
- }
- return ret;
-}
-
-static inline void bch2_journal_preres_put(struct journal *j,
- struct journal_preres *res)
-{
- union journal_preres_state s = { .reserved = res->u64s };
-
- if (!res->u64s)
- return;
-
- s.v = atomic64_sub_return(s.v, &j->prereserved.counter);
- res->u64s = 0;
-
- if (unlikely(s.waiting)) {
- clear_bit(ilog2((((union journal_preres_state) { .waiting = 1 }).v)),
- (unsigned long *) &j->prereserved.v);
- closure_wake_up(&j->preres_wait);
- }
-
- if (s.reserved <= s.remaining &&
- !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
- spin_lock(&j->lock);
- journal_check_may_get_unreserved(j);
- spin_unlock(&j->lock);
- }
-}
-
-int __bch2_journal_preres_get(struct journal *,
- struct journal_preres *, unsigned, unsigned);
-
-static inline int bch2_journal_preres_get_fast(struct journal *j,
- struct journal_preres *res,
- unsigned new_u64s,
- unsigned flags,
- bool set_waiting)
-{
- int d = new_u64s - res->u64s;
- union journal_preres_state old, new;
- u64 v = atomic64_read(&j->prereserved.counter);
- int ret;
-
- do {
- old.v = new.v = v;
- ret = 0;
-
- if ((flags & JOURNAL_RES_GET_RESERVED) ||
- new.reserved + d < new.remaining) {
- new.reserved += d;
- ret = 1;
- } else if (set_waiting && !new.waiting)
- new.waiting = true;
- else
- return 0;
- } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
- old.v, new.v)) != old.v);
-
- if (ret)
- res->u64s += d;
- return ret;
-}
-
-static inline int bch2_journal_preres_get(struct journal *j,
- struct journal_preres *res,
- unsigned new_u64s,
- unsigned flags)
-{
- if (new_u64s <= res->u64s)
- return 0;
-
- if (bch2_journal_preres_get_fast(j, res, new_u64s, flags, false))
- return 0;
-
- if (flags & JOURNAL_RES_GET_NONBLOCK)
- return -EAGAIN;
-
- return __bch2_journal_preres_get(j, res, new_u64s, flags);
-}
-
/* journal_entry_res: */
void bch2_journal_entry_res_resize(struct journal *,
int bch2_journal_flush(struct journal *);
bool bch2_journal_noflush_seq(struct journal *, u64);
int bch2_journal_meta(struct journal *);
-int bch2_journal_log_msg(struct journal *, const char *, ...);
void bch2_journal_halt(struct journal *);
int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
unsigned nr);
int bch2_dev_journal_alloc(struct bch_dev *);
+int bch2_fs_journal_alloc(struct bch_fs *);
void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
void bch2_fs_journal_stop(struct journal *);
-int bch2_fs_journal_start(struct journal *, u64, struct list_head *);
+int bch2_fs_journal_start(struct journal *, u64);
void bch2_dev_journal_exit(struct bch_dev *);
int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *);