4 #include "btree_iter.h"
13 #include <linux/freezer.h>
14 #include <linux/kthread.h>
15 #include <trace/events/bcachefs.h>
17 static bool __tiering_pred(struct bch_fs *c, struct bch_tier *tier,
18 struct bkey_s_c_extent e)
20 const struct bch_extent_ptr *ptr;
21 unsigned replicas = 0;
23 /* Make sure we have room to add a new pointer: */
24 if (bkey_val_u64s(e.k) + BKEY_EXTENT_PTR_U64s_MAX >
25 BKEY_EXTENT_VAL_U64s_MAX)
28 extent_for_each_ptr(e, ptr)
29 if (bch_dev_bkey_exists(c, ptr->dev)->mi.tier >= tier->idx)
32 return replicas < c->opts.data_replicas;
35 static enum data_cmd tiering_pred(struct bch_fs *c, void *arg,
37 struct bkey_s_c_extent e,
38 struct bch_io_opts *io_opts,
39 struct data_opts *data_opts)
41 struct bch_tier *tier = arg;
43 if (!__tiering_pred(c, tier, e))
46 data_opts->btree_insert_flags = 0;
47 return DATA_ADD_REPLICAS;
50 static int bch2_tiering_thread(void *arg)
52 struct bch_tier *tier = arg;
53 struct bch_fs *c = container_of(tier, struct bch_fs, tiers[tier->idx]);
54 struct io_clock *clock = &c->io_clock[WRITE];
56 struct bch_move_stats move_stats;
57 u64 tier_capacity, available_sectors;
59 unsigned i, nr_devices;
61 memset(&move_stats, 0, sizeof(move_stats));
64 while (!kthread_should_stop()) {
65 if (kthread_wait_freezable(c->tiering_enabled &&
66 (nr_devices = dev_mask_nr(&tier->devs))))
70 struct bch_tier *faster_tier;
72 last = atomic_long_read(&clock->now);
74 tier_capacity = available_sectors = 0;
75 for (faster_tier = c->tiers;
79 for_each_member_device_rcu(ca, c, i,
87 dev_buckets_available(c, ca));
92 if (available_sectors < (tier_capacity >> 1))
95 bch2_kthread_io_clock_wait(clock,
98 (tier_capacity >> 1));
99 if (kthread_should_stop())
103 bch2_move_data(c, &tier->pd.rate,
104 SECTORS_IN_FLIGHT_PER_DEVICE * nr_devices,
106 writepoint_ptr(&tier->wp),
115 static void __bch2_tiering_stop(struct bch_tier *tier)
117 tier->pd.rate.rate = UINT_MAX;
118 bch2_ratelimit_reset(&tier->pd.rate);
121 kthread_stop(tier->migrate);
123 tier->migrate = NULL;
126 void bch2_tiering_stop(struct bch_fs *c)
128 struct bch_tier *tier;
130 for (tier = c->tiers; tier < c->tiers + ARRAY_SIZE(c->tiers); tier++)
131 __bch2_tiering_stop(tier);
134 static int __bch2_tiering_start(struct bch_tier *tier)
136 if (!tier->migrate) {
137 struct task_struct *p =
138 kthread_create(bch2_tiering_thread, tier,
139 "bch_tier[%u]", tier->idx);
146 wake_up_process(tier->migrate);
150 int bch2_tiering_start(struct bch_fs *c)
152 struct bch_tier *tier;
153 bool have_faster_tier = false;
155 if (c->opts.nochanges)
158 for (tier = c->tiers; tier < c->tiers + ARRAY_SIZE(c->tiers); tier++) {
159 if (!dev_mask_nr(&tier->devs))
162 if (have_faster_tier) {
163 int ret = __bch2_tiering_start(tier);
167 __bch2_tiering_stop(tier);
170 have_faster_tier = true;
176 void bch2_fs_tiering_init(struct bch_fs *c)
180 for (i = 0; i < ARRAY_SIZE(c->tiers); i++) {
182 bch2_pd_controller_init(&c->tiers[i].pd);