4 #include "btree_iter.h"
14 #include <linux/freezer.h>
15 #include <linux/kthread.h>
16 #include <trace/events/bcachefs.h>
18 static bool tiering_pred(void *arg, struct bkey_s_c_extent e)
20 struct bch_tier *tier = arg;
21 struct bch_fs *c = container_of(tier, struct bch_fs, tiers[tier->idx]);
22 const struct bch_extent_ptr *ptr;
23 unsigned replicas = 0;
25 /* Make sure we have room to add a new pointer: */
26 if (bkey_val_u64s(e.k) + BKEY_EXTENT_PTR_U64s_MAX >
27 BKEY_EXTENT_VAL_U64s_MAX)
30 extent_for_each_ptr(e, ptr)
31 if (c->devs[ptr->dev]->mi.tier >= tier->idx)
34 return replicas < c->opts.data_replicas;
37 static int bch2_tiering_thread(void *arg)
39 struct bch_tier *tier = arg;
40 struct bch_fs *c = container_of(tier, struct bch_fs, tiers[tier->idx]);
41 struct io_clock *clock = &c->io_clock[WRITE];
43 u64 tier_capacity, available_sectors, keys_moved, sectors_moved;
45 unsigned i, nr_devices;
49 while (!kthread_should_stop()) {
50 if (kthread_wait_freezable(c->tiering_enabled &&
51 (nr_devices = dev_mask_nr(&tier->devs))))
55 struct bch_tier *faster_tier;
57 last = atomic_long_read(&clock->now);
59 tier_capacity = available_sectors = 0;
60 for (faster_tier = c->tiers;
64 for_each_member_device_rcu(ca, c, i,
72 dev_buckets_available(c, ca));
77 if (available_sectors < (tier_capacity >> 1))
80 bch2_kthread_io_clock_wait(clock,
83 (tier_capacity >> 1));
84 if (kthread_should_stop())
88 bch2_move_data(c, &tier->pd.rate,
89 SECTORS_IN_FLIGHT_PER_DEVICE * nr_devices,
91 writepoint_ptr(&tier->wp),
102 static void __bch2_tiering_stop(struct bch_tier *tier)
104 tier->pd.rate.rate = UINT_MAX;
105 bch2_ratelimit_reset(&tier->pd.rate);
108 kthread_stop(tier->migrate);
110 tier->migrate = NULL;
113 void bch2_tiering_stop(struct bch_fs *c)
115 struct bch_tier *tier;
117 for (tier = c->tiers; tier < c->tiers + ARRAY_SIZE(c->tiers); tier++)
118 __bch2_tiering_stop(tier);
121 static int __bch2_tiering_start(struct bch_tier *tier)
123 if (!tier->migrate) {
124 struct task_struct *p =
125 kthread_create(bch2_tiering_thread, tier,
126 "bch_tier[%u]", tier->idx);
133 wake_up_process(tier->migrate);
137 int bch2_tiering_start(struct bch_fs *c)
139 struct bch_tier *tier;
140 bool have_faster_tier = false;
142 if (c->opts.nochanges)
145 for (tier = c->tiers; tier < c->tiers + ARRAY_SIZE(c->tiers); tier++) {
146 if (!dev_mask_nr(&tier->devs))
149 if (have_faster_tier) {
150 int ret = __bch2_tiering_start(tier);
154 __bch2_tiering_stop(tier);
157 have_faster_tier = true;
163 void bch2_fs_tiering_init(struct bch_fs *c)
167 for (i = 0; i < ARRAY_SIZE(c->tiers); i++) {
169 bch2_pd_controller_init(&c->tiers[i].pd);