+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_JOURNAL_H
#define _BCACHEFS_JOURNAL_H
return j->buf + j->reservations.idx;
}
-static inline struct journal_buf *journal_prev_buf(struct journal *j)
-{
- return j->buf + !j->reservations.idx;
-}
-
/* Sequence number of oldest dirty journal entry */
static inline u64 journal_last_seq(struct journal *j)
static inline u64 journal_cur_seq(struct journal *j)
{
- BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
+ EBUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
return j->pin.back - 1;
}
u64 bch2_inode_journal_seq(struct journal *, u64);
+void bch2_journal_set_has_inum(struct journal *, u64, u64);
static inline int journal_state_count(union journal_res_state s, int idx)
{
- return idx == 0 ? s.buf0_count : s.buf1_count;
+ switch (idx) {
+ case 0: return s.buf0_count;
+ case 1: return s.buf1_count;
+ case 2: return s.buf2_count;
+ case 3: return s.buf3_count;
+ }
+ BUG();
}
static inline void journal_state_inc(union journal_res_state *s)
{
s->buf0_count += s->idx == 0;
s->buf1_count += s->idx == 1;
+ s->buf2_count += s->idx == 2;
+ s->buf3_count += s->idx == 3;
}
static inline void bch2_journal_set_has_inode(struct journal *j,
return entry;
}
+static inline struct jset_entry *
+journal_res_entry(struct journal *j, struct journal_res *res)
+{
+ return vstruct_idx(j->buf[res->idx].data, res->offset);
+}
+
+static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type,
+ enum btree_id id, unsigned level,
+ const void *data, unsigned u64s)
+{
+ entry->u64s = cpu_to_le16(u64s);
+ entry->btree_id = id;
+ entry->level = level;
+ entry->type = type;
+ entry->pad[0] = 0;
+ entry->pad[1] = 0;
+ entry->pad[2] = 0;
+ memcpy_u64s_small(entry->_data, data, u64s);
+
+ return jset_u64s(u64s);
+}
+
static inline void bch2_journal_add_entry(struct journal *j, struct journal_res *res,
unsigned type, enum btree_id id,
unsigned level,
const void *data, unsigned u64s)
{
- struct journal_buf *buf = &j->buf[res->idx];
- struct jset_entry *entry = vstruct_idx(buf->data, res->offset);
- unsigned actual = jset_u64s(u64s);
+ unsigned actual = journal_entry_set(journal_res_entry(j, res),
+ type, id, level, data, u64s);
EBUG_ON(!res->ref);
EBUG_ON(actual > res->u64s);
res->offset += actual;
res->u64s -= actual;
-
- memset(entry, 0, sizeof(*entry));
- entry->u64s = cpu_to_le16(u64s);
- entry->type = type;
- entry->btree_id = id;
- entry->level = level;
- memcpy_u64s(entry->_data, data, u64s);
}
static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res,
- enum btree_id id, const struct bkey_i *k)
+ enum btree_id id, unsigned level,
+ const struct bkey_i *k)
{
bch2_journal_add_entry(j, res, BCH_JSET_ENTRY_btree_keys,
- id, 0, k, k->k.u64s);
+ id, level, k, k->k.u64s);
}
static inline bool journal_entry_empty(struct jset *j)
return true;
}
-void __bch2_journal_buf_put(struct journal *, bool);
+void __bch2_journal_buf_put(struct journal *);
-static inline void bch2_journal_buf_put(struct journal *j, unsigned idx,
- bool need_write_just_set)
+static inline void bch2_journal_buf_put(struct journal *j, unsigned idx)
{
union journal_res_state s;
s.v = atomic64_sub_return(((union journal_res_state) {
.buf0_count = idx == 0,
.buf1_count = idx == 1,
+ .buf2_count = idx == 2,
+ .buf3_count = idx == 3,
}).v, &j->reservations.counter);
- if (!journal_state_count(s, idx)) {
- EBUG_ON(s.idx == idx || !s.prev_buf_unwritten);
- __bch2_journal_buf_put(j, need_write_just_set);
- }
+
+ EBUG_ON(((s.idx - idx) & 3) >
+ ((s.idx - s.unwritten_idx) & 3));
+
+ if (!journal_state_count(s, idx) && idx == s.unwritten_idx)
+ __bch2_journal_buf_put(j);
}
/*
if (!res->ref)
return;
- lock_release(&j->res_map, 0, _RET_IP_);
+ lock_release(&j->res_map, _THIS_IP_);
while (res->u64s)
bch2_journal_add_entry(j, res,
BCH_JSET_ENTRY_btree_keys,
0, 0, NULL, 0);
- bch2_journal_buf_put(j, res->idx, false);
+ bch2_journal_buf_put(j, res->idx);
res->ref = 0;
}
!test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags))
return 0;
- if (flags & JOURNAL_RES_GET_CHECK)
- return 1;
-
new.cur_entry_offset += res->u64s;
journal_state_inc(&new);
+
+ /*
+ * If the refcount would overflow, we have to wait:
+ * XXX - tracepoint this:
+ */
+ if (!journal_state_count(new, new.idx))
+ return 0;
+
+ if (flags & JOURNAL_RES_GET_CHECK)
+ return 1;
} while ((v = atomic64_cmpxchg(&j->reservations.counter,
old.v, new.v)) != old.v);
return ret;
out:
if (!(flags & JOURNAL_RES_GET_CHECK)) {
- lock_acquire_shared(&j->res_map, 0, 0, NULL, _THIS_IP_);
+ lock_acquire_shared(&j->res_map, 0,
+ (flags & JOURNAL_RES_GET_NONBLOCK) != 0,
+ NULL, _THIS_IP_);
EBUG_ON(!res->ref);
}
return 0;
static inline bool journal_check_may_get_unreserved(struct journal *j)
{
union journal_preres_state s = READ_ONCE(j->prereserved);
- bool ret = s.reserved <= s.remaining &&
+ bool ret = s.reserved < s.remaining &&
fifo_free(&j->pin) > 8;
lockdep_assert_held(&j->lock);
s.v = atomic64_sub_return(s.v, &j->prereserved.counter);
res->u64s = 0;
- closure_wake_up(&j->preres_wait);
+
+ if (unlikely(s.waiting)) {
+ clear_bit(ilog2((((union journal_preres_state) { .waiting = 1 }).v)),
+ (unsigned long *) &j->prereserved.v);
+ closure_wake_up(&j->preres_wait);
+ }
if (s.reserved <= s.remaining &&
!test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
}
int __bch2_journal_preres_get(struct journal *,
- struct journal_preres *, unsigned);
+ struct journal_preres *, unsigned, unsigned);
static inline int bch2_journal_preres_get_fast(struct journal *j,
struct journal_preres *res,
- unsigned new_u64s)
+ unsigned new_u64s,
+ unsigned flags,
+ bool set_waiting)
{
int d = new_u64s - res->u64s;
union journal_preres_state old, new;
u64 v = atomic64_read(&j->prereserved.counter);
+ int ret;
do {
old.v = new.v = v;
-
- new.reserved += d;
-
- if (new.reserved > new.remaining)
+ ret = 0;
+
+ if ((flags & JOURNAL_RES_GET_RESERVED) ||
+ new.reserved + d < new.remaining) {
+ new.reserved += d;
+ ret = 1;
+ } else if (set_waiting && !new.waiting)
+ new.waiting = true;
+ else
return 0;
} while ((v = atomic64_cmpxchg(&j->prereserved.counter,
old.v, new.v)) != old.v);
- res->u64s += d;
- return 1;
+ if (ret)
+ res->u64s += d;
+ return ret;
}
static inline int bch2_journal_preres_get(struct journal *j,
if (new_u64s <= res->u64s)
return 0;
- if (bch2_journal_preres_get_fast(j, res, new_u64s))
+ if (bch2_journal_preres_get_fast(j, res, new_u64s, flags, false))
return 0;
if (flags & JOURNAL_RES_GET_NONBLOCK)
return -EAGAIN;
- return __bch2_journal_preres_get(j, res, new_u64s);
+ return __bch2_journal_preres_get(j, res, new_u64s, flags);
}
/* journal_entry_res: */
struct journal_entry_res *,
unsigned);
-u64 bch2_journal_last_unwritten_seq(struct journal *);
-int bch2_journal_open_seq_async(struct journal *, u64, struct closure *);
-
-void bch2_journal_wait_on_seq(struct journal *, u64, struct closure *);
-void bch2_journal_flush_seq_async(struct journal *, u64, struct closure *);
+int bch2_journal_flush_seq_async(struct journal *, u64, struct closure *);
void bch2_journal_flush_async(struct journal *, struct closure *);
-void bch2_journal_meta_async(struct journal *, struct closure *);
int bch2_journal_flush_seq(struct journal *, u64);
int bch2_journal_flush(struct journal *);
struct bch_dev;
-static inline bool journal_flushes_device(struct bch_dev *ca)
-{
- return true;
-}
-
static inline void bch2_journal_set_replay_done(struct journal *j)
{
BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
void bch2_journal_unblock(struct journal *);
void bch2_journal_block(struct journal *);
-ssize_t bch2_journal_print_debug(struct journal *, char *);
-ssize_t bch2_journal_print_pins(struct journal *, char *);
+void __bch2_journal_debug_to_text(struct printbuf *, struct journal *);
+void bch2_journal_debug_to_text(struct printbuf *, struct journal *);
+void bch2_journal_pins_to_text(struct printbuf *, struct journal *);
int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
unsigned nr);