-#ifndef _BCACHE_ALLOC_H
-#define _BCACHE_ALLOC_H
+#ifndef _BCACHEFS_ALLOC_H
+#define _BCACHEFS_ALLOC_H
+#include "bcachefs.h"
#include "alloc_types.h"
struct bkey;
-struct bucket;
struct bch_dev;
struct bch_fs;
-struct dev_group;
+struct bch_devs_List;
-static inline size_t prios_per_bucket(const struct bch_dev *ca)
+struct dev_alloc_list {
+ unsigned nr;
+ u8 devs[BCH_SB_MEMBERS_MAX];
+};
+
+struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *,
+ struct write_point *,
+ struct bch_devs_mask *);
+void bch2_wp_rescale(struct bch_fs *, struct bch_dev *,
+ struct write_point *);
+
+int bch2_alloc_read(struct bch_fs *, struct list_head *);
+int bch2_alloc_replay_key(struct bch_fs *, struct bpos);
+
+enum bucket_alloc_ret {
+ ALLOC_SUCCESS = 0,
+ OPEN_BUCKETS_EMPTY = -1,
+ FREELIST_EMPTY = -2, /* Allocator thread not keeping up */
+ NO_DEVICES = -3, /* -EROFS */
+};
+
+int bch2_bucket_alloc(struct bch_fs *, struct bch_dev *, enum alloc_reserve, bool,
+ struct closure *);
+
+void __bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
+
+static inline void bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
{
- return (bucket_bytes(ca) - sizeof(struct prio_set)) /
- sizeof(struct bucket_disk);
+ if (atomic_dec_and_test(&ob->pin))
+ __bch2_open_bucket_put(c, ob);
}
-static inline size_t prio_buckets(const struct bch_dev *ca)
+static inline void bch2_open_bucket_put_refs(struct bch_fs *c, u8 *nr, u8 *refs)
{
- return DIV_ROUND_UP((size_t) (ca)->mi.nbuckets, prios_per_bucket(ca));
-}
+ unsigned i;
-void bch2_dev_group_remove(struct dev_group *, struct bch_dev *);
-void bch2_dev_group_add(struct dev_group *, struct bch_dev *);
+ for (i = 0; i < *nr; i++)
+ bch2_open_bucket_put(c, c->open_buckets + refs[i]);
-int bch2_prio_read(struct bch_dev *);
-int bch2_prio_write(struct bch_dev *);
+ *nr = 0;
+}
-size_t bch2_bucket_alloc(struct bch_dev *, enum alloc_reserve);
+static inline void bch2_open_bucket_get(struct bch_fs *c,
+ struct write_point *wp,
+ u8 *nr, u8 *refs)
+{
+ unsigned i;
-void bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
+ for (i = 0; i < wp->nr_ptrs_can_use; i++) {
+ struct open_bucket *ob = wp->ptrs[i];
-struct open_bucket *bch2_alloc_sectors_start(struct bch_fs *,
- struct write_point *,
- unsigned, unsigned,
- enum alloc_reserve,
- struct closure *);
+ atomic_inc(&ob->pin);
+ refs[(*nr)++] = ob - c->open_buckets;
+ }
+}
-void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct bkey_i_extent *,
- unsigned, struct open_bucket *, unsigned);
-void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *,
- struct open_bucket *);
+struct write_point *bch2_alloc_sectors_start(struct bch_fs *,
+ struct bch_devs_mask *,
+ struct write_point_specifier,
+ struct bch_devs_list *,
+ unsigned, unsigned,
+ enum alloc_reserve,
+ unsigned,
+ struct closure *);
-struct open_bucket *bch2_alloc_sectors(struct bch_fs *, struct write_point *,
- struct bkey_i_extent *, unsigned, unsigned,
- enum alloc_reserve, struct closure *);
+void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
+ struct bkey_i_extent *, unsigned);
+void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
static inline void bch2_wake_allocator(struct bch_dev *ca)
{
struct task_struct *p;
rcu_read_lock();
- if ((p = ACCESS_ONCE(ca->alloc_thread)))
+ if ((p = READ_ONCE(ca->alloc_thread)))
wake_up_process(p);
rcu_read_unlock();
}
-static inline struct bch_dev *dev_group_next(struct dev_group *devs,
- unsigned *iter)
-{
- struct bch_dev *ret = NULL;
+#define writepoint_for_each_ptr(_wp, _ob, _i) \
+ for ((_i) = 0; \
+ (_i) < (_wp)->nr_ptrs && ((_ob) = (_wp)->ptrs[_i], true); \
+ (_i)++)
- while (*iter < devs->nr &&
- !(ret = rcu_dereference_check(devs->d[*iter].dev,
- lockdep_is_held(&devs->lock))))
- (*iter)++;
+static inline struct write_point_specifier writepoint_hashed(unsigned long v)
+{
+ return (struct write_point_specifier) { .v = v | 1 };
+}
- return ret;
+static inline struct write_point_specifier writepoint_ptr(struct write_point *wp)
+{
+ return (struct write_point_specifier) { .v = (unsigned long) wp };
}
-#define group_for_each_dev(ca, devs, iter) \
- for ((iter) = 0; \
- ((ca) = dev_group_next((devs), &(iter))); \
- (iter)++)
+void bch2_recalc_capacity(struct bch_fs *);
-#define open_bucket_for_each_ptr(_ob, _ptr) \
- for ((_ptr) = (_ob)->ptrs; \
- (_ptr) < (_ob)->ptrs + (_ob)->nr_ptrs; \
- (_ptr)++)
+void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
+void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
-void bch2_recalc_capacity(struct bch_fs *);
void bch2_dev_allocator_stop(struct bch_dev *);
int bch2_dev_allocator_start(struct bch_dev *);
+
+static inline void writepoint_init(struct write_point *wp,
+ enum bch_data_type type)
+{
+ mutex_init(&wp->lock);
+ wp->type = type;
+}
+
+int bch2_alloc_write(struct bch_fs *);
+int bch2_fs_allocator_start(struct bch_fs *);
void bch2_fs_allocator_init(struct bch_fs *);
-#endif /* _BCACHE_ALLOC_H */
+extern const struct bkey_ops bch2_bkey_alloc_ops;
+
+#endif /* _BCACHEFS_ALLOC_H */