-969fbff4ef3a75ae25ef7cca17dd4e028443bfc2
+fad6d13aa55f96e01cc6ff516cdfea53b2fc9eb1
n_ondisk = malloc(btree_bytes(c));
- bio = bio_alloc_bioset(GFP_NOIO,
- buf_pages(n_ondisk, btree_bytes(c)),
- &c->btree_bio);
- bio_set_dev(bio, ca->disk_sb.bdev);
- bio->bi_opf = REQ_OP_READ|REQ_META;
+ bio = bio_alloc_bioset(ca->disk_sb.bdev,
+ buf_pages(n_ondisk, btree_bytes(c)),
+ REQ_OP_READ|REQ_META,
+ GFP_NOIO,
+ &c->btree_bio);
bio->bi_iter.bi_sector = pick.ptr.offset;
bch2_bio_map(bio, n_ondisk, btree_bytes(c));
closure_init_stack(&cl);
- bio_init(&op.wbio.bio, bv, ARRAY_SIZE(bv));
+ bio_init(&op.wbio.bio, NULL, bv, ARRAY_SIZE(bv), 0);
bch2_bio_map(&op.wbio.bio, buf, len);
bch2_write_op_init(&op, c, bch2_opts_to_inode_opts(c->opts));
bch2_fs_stop(c);
return ret;
online:
- unsigned dev_idx;
- struct bchfs_handle fs = bchu_fs_open_by_dev(argv[i], &dev_idx);
+ {
+ unsigned dev_idx;
+ struct bchfs_handle fs = bchu_fs_open_by_dev(argv[i], &dev_idx);
- for (i = 0; i < bch2_opts_nr; i++) {
- if (!new_opt_strs.by_id[i])
- continue;
+ for (i = 0; i < bch2_opts_nr; i++) {
+ if (!new_opt_strs.by_id[i])
+ continue;
- char *path = mprintf("options/%s", bch2_opt_table[i].attr.name);
+ char *path = mprintf("options/%s", bch2_opt_table[i].attr.name);
- write_file_str(fs.sysfs_fd, path, new_opt_strs.by_id[i]);
- free(path);
+ write_file_str(fs.sysfs_fd, path, new_opt_strs.by_id[i]);
+ free(path);
+ }
}
return 0;
}
BIOSET_NEED_RESCUER = 1 << 1,
};
-extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
+struct bio *bio_alloc_bioset(struct block_device *, unsigned,
+ unsigned, gfp_t, struct bio_set *);
extern void bio_put(struct bio *);
int bio_add_page(struct bio *, struct page *, unsigned, unsigned);
-extern void __bio_clone_fast(struct bio *, struct bio *);
-extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
-extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
+struct bio *bio_alloc_clone(struct block_device *, struct bio *,
+ gfp_t, struct bio_set *);
struct bio *bio_kmalloc(gfp_t, unsigned int);
-static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
-{
- return bio_clone_bioset(bio, gfp_mask, NULL);
-
-}
-
extern void bio_endio(struct bio *);
extern void bio_advance(struct bio *, unsigned);
-extern void bio_reset(struct bio *);
+extern void bio_reset(struct bio *, struct block_device *, unsigned);
void bio_chain(struct bio *, struct bio *);
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
atomic_inc(&bio->__bi_remaining);
}
-static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
-}
-
-static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
-{
- return bio_clone_bioset(bio, gfp_mask, NULL);
-}
-
-static inline void bio_init(struct bio *bio, struct bio_vec *table,
- unsigned short max_vecs)
+static inline void bio_init(struct bio *bio,
+ struct block_device *bdev,
+ struct bio_vec *table,
+ unsigned short max_vecs,
+ unsigned int opf)
{
memset(bio, 0, sizeof(*bio));
+ bio->bi_bdev = bdev;
+ bio->bi_opf = opf;
atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->__bi_cnt, 1);
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs;
+ const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
};
struct kobject {
struct kobject *parent;
struct kset *kset;
- struct kobj_type *ktype;
+ const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
atomic_t ref;
unsigned int state_initialized:1;
#define kobject_add(...) 0
-static inline void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
+static inline void kobject_init(struct kobject *kobj, const struct kobj_type *ktype)
{
memset(kobj, 0, sizeof(*kobj));
static inline void kobject_cleanup(struct kobject *kobj)
{
- struct kobj_type *t = kobj->ktype;
+ const struct kobj_type *t = kobj->ktype;
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs)
umode_t mode;
};
+struct attribute_group {
+ struct attribute **attrs;
+};
+
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *, char *);
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
struct bch_btree_ptr {
struct bch_val v;
- struct bch_extent_ptr start[0];
__u64 _data[0];
+ struct bch_extent_ptr start[];
} __attribute__((packed, aligned(8)));
struct bch_btree_ptr_v2 {
__le16 sectors_written;
__le16 flags;
struct bpos min_key;
- struct bch_extent_ptr start[0];
__u64 _data[0];
+ struct bch_extent_ptr start[];
} __attribute__((packed, aligned(8)));
LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
struct bch_extent {
struct bch_val v;
- union bch_extent_entry start[0];
__u64 _data[0];
+ union bch_extent_entry start[];
} __attribute__((packed, aligned(8)));
struct bch_reservation {
__u8 csum_type;
__u8 pad;
- struct bch_extent_ptr ptrs[0];
+ struct bch_extent_ptr ptrs[];
} __attribute__((packed, aligned(8)));
/* Reflink: */
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
- __u8 devs[0];
+ __u8 devs[];
} __attribute__((packed));
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
- struct bch_replicas_entry_v0 entries[0];
+ struct bch_replicas_entry_v0 entries[];
} __attribute__((packed, aligned(8)));
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
- __u8 devs[0];
+ __u8 devs[];
} __attribute__((packed));
#define replicas_entry_bytes(_i) \
{
struct pack_state out_s = pack_state_init(out_f, out);
struct unpack_state in_s = unpack_state_init(in_f, in);
+ u64 *w = out->_data;
unsigned i;
- out->_data[0] = 0;
+ *w = 0;
for (i = 0; i < BKEY_NR_FIELDS; i++)
if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i)))
const struct bkey_format *format)
{
struct pack_state state = pack_state_init(format, out);
+ u64 *w = out->_data;
EBUG_ON((void *) in == (void *) out);
EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
EBUG_ON(in->format != KEY_FORMAT_CURRENT);
- out->_data[0] = 0;
+ *w = 0;
#define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
bkey_fields()
{
const struct bkey_format *f = &b->format;
struct pack_state state = pack_state_init(f, out);
+ u64 *w = out->_data;
#ifdef CONFIG_BCACHEFS_DEBUG
struct bpos orig = in;
#endif
* enough - we need to make sure to zero them out:
*/
for (i = 0; i < f->key_u64s; i++)
- out->_data[i] = 0;
+ w[i] = 0;
if (unlikely(in.snapshot <
le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
bch_info(c, "retrying read");
ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
- bio_reset(bio);
- bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
+ bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
bio->bi_iter.bi_size = btree_bytes(c);
for (i = 0; i < ra->nr; i++) {
ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
- ra->bio[i] = bio_alloc_bioset(GFP_NOFS, buf_pages(ra->buf[i],
- btree_bytes(c)),
+ ra->bio[i] = bio_alloc_bioset(NULL,
+ buf_pages(ra->buf[i], btree_bytes(c)),
+ REQ_OP_READ|REQ_SYNC|REQ_META,
+ GFP_NOFS,
&c->btree_bio);
}
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
rb->idx = i;
rb->pick = pick;
- rb->bio.bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
rb->bio.bi_iter.bi_sector = pick.ptr.offset;
rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
- bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
- btree_bytes(c)),
+ bio = bio_alloc_bioset(NULL,
+ buf_pages(b->data, btree_bytes(c)),
+ REQ_OP_READ|REQ_SYNC|REQ_META,
+ GFP_NOIO,
&c->btree_bio);
rb = container_of(bio, struct btree_read_bio, bio);
rb->c = c;
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
rb->pick = pick;
INIT_WORK(&rb->work, btree_node_read_work);
- bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_end_io = btree_node_read_endio;
bch2_bio_map(bio, b->data, btree_bytes(c));
trace_btree_write(b, bytes_to_write, sectors_to_write);
- wbio = container_of(bio_alloc_bioset(GFP_NOIO,
+ wbio = container_of(bio_alloc_bioset(NULL,
buf_pages(data, sectors_to_write << 9),
+ REQ_OP_WRITE|REQ_META,
+ GFP_NOIO,
&c->btree_bio),
struct btree_write_bio, wbio.bio);
wbio_init(&wbio->wbio.bio);
wbio->wbio.c = c;
wbio->wbio.used_mempool = used_mempool;
wbio->wbio.first_btree_write = !b->written;
- wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
wbio->wbio.bio.bi_private = b;
static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
struct btree_path *src)
{
- unsigned i;
+ unsigned i, offset = offsetof(struct btree_path, pos);
- memcpy(&dst->pos, &src->pos,
- sizeof(struct btree_path) - offsetof(struct btree_path, pos));
+ memcpy((void *) dst + offset,
+ (void *) src + offset,
+ sizeof(struct btree_path) - offset);
for (i = 0; i < BTREE_MAX_DEPTH; i++)
if (btree_node_locked(dst, i))
*/
void bch2_trans_begin(struct btree_trans *trans)
{
- struct btree_insert_entry *i;
struct btree_path *path;
- trans_for_each_update(trans, i)
- __btree_path_put(i->path, true);
+ bch2_trans_reset_updates(trans);
- memset(&trans->journal_res, 0, sizeof(trans->journal_res));
- trans->extra_journal_res = 0;
- trans->nr_updates = 0;
trans->mem_top = 0;
- trans->hooks = NULL;
- trans->extra_journal_entries.nr = 0;
-
if (trans->fs_usage_deltas) {
trans->fs_usage_deltas->used = 0;
- memset(&trans->fs_usage_deltas->memset_start, 0,
+ memset((void *) trans->fs_usage_deltas +
+ offsetof(struct replicas_delta_list, memset_start), 0,
(void *) &trans->fs_usage_deltas->memset_end -
(void *) &trans->fs_usage_deltas->memset_start);
}
(_i) < (_trans)->updates + (_trans)->nr_updates; \
(_i)++)
+static inline void bch2_trans_reset_updates(struct btree_trans *trans)
+{
+ struct btree_insert_entry *i;
+
+ trans_for_each_update(trans, i)
+ bch2_path_put(trans, i->path, true);
+
+ trans->extra_journal_res = 0;
+ trans->nr_updates = 0;
+ trans->hooks = NULL;
+ trans->extra_journal_entries.nr = 0;
+}
+
#endif /* _BCACHEFS_BTREE_UPDATE_H */
if (likely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
percpu_ref_put(&c->writes);
out_reset:
- trans_for_each_update(trans, i)
- bch2_path_put(trans, i->path, true);
-
- trans->extra_journal_res = 0;
- trans->nr_updates = 0;
- trans->hooks = NULL;
- trans->extra_journal_entries.nr = 0;
+ bch2_trans_reset_updates(trans);
if (trans->fs_usage_deltas) {
trans->fs_usage_deltas->used = 0;
- memset(&trans->fs_usage_deltas->memset_start, 0,
+ memset((void *) trans->fs_usage_deltas +
+ offsetof(struct replicas_delta_list, memset_start), 0,
(void *) &trans->fs_usage_deltas->memset_end -
(void *) &trans->fs_usage_deltas->memset_start);
}
n = (void *) d->d + d->used;
n->delta = sectors;
- memcpy(&n->r, r, replicas_entry_bytes(r));
+ memcpy((void *) n + offsetof(struct replicas_delta, r),
+ r, replicas_entry_bytes(r));
bch2_replicas_entry_sort(&n->r);
d->used += b;
}
return 0;
}
-int bch2_fs_counters_init(struct bch_fs *c)
+void bch2_fs_counters_exit(struct bch_fs *c)
{
- int ret = 0;
+ free_percpu(c->counters);
+}
+int bch2_fs_counters_init(struct bch_fs *c)
+{
c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64));
-
if (!c->counters)
return -ENOMEM;
- ret = bch2_sb_counters_to_cpu(c);
-
- return ret;
+ return bch2_sb_counters_to_cpu(c);
}
const struct bch_sb_field_ops bch_sb_field_ops_counters = {
#include "super-io.h"
-int bch2_sb_counters_to_cpu(struct bch_fs *c);
+int bch2_sb_counters_to_cpu(struct bch_fs *);
+int bch2_sb_counters_from_cpu(struct bch_fs *);
-int bch2_sb_counters_from_cpu(struct bch_fs *c);
-
-int bch2_fs_counters_init(struct bch_fs *c);
+void bch2_fs_counters_exit(struct bch_fs *);
+int bch2_fs_counters_init(struct bch_fs *);
extern const struct bch_sb_field_ops bch_sb_field_ops_counters;
if (!bch2_dev_get_ioref(ca, READ))
return false;
- bio = bio_alloc_bioset(GFP_NOIO,
- buf_pages(n_sorted, btree_bytes(c)),
- &c->btree_bio);
- bio_set_dev(bio, ca->disk_sb.bdev);
- bio->bi_opf = REQ_OP_READ|REQ_META;
+ bio = bio_alloc_bioset(ca->disk_sb.bdev,
+ buf_pages(n_sorted, btree_bytes(c)),
+ REQ_OP_READ|REQ_META,
+ GFP_NOIO,
+ &c->btree_bio);
bio->bi_iter.bi_sector = pick.ptr.offset;
bch2_bio_map(bio, n_sorted, btree_bytes(c));
nr_iovecs << PAGE_SHIFT);
struct ec_bio *ec_bio;
- ec_bio = container_of(bio_alloc_bioset(GFP_KERNEL, nr_iovecs,
+ ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
+ nr_iovecs,
+ rw,
+ GFP_KERNEL,
&c->ec_bioset),
struct ec_bio, bio);
ec_bio->buf = buf;
ec_bio->idx = idx;
- bio_set_dev(&ec_bio->bio, ca->disk_sb.bdev);
- bio_set_op_attrs(&ec_bio->bio, rw, 0);
-
ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
ec_bio->bio.bi_end_io = ec_block_endio;
ec_bio->bio.bi_private = cl;
return ret;
}
-void bch2_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
- if (offset || length < PAGE_SIZE)
+ if (offset || length < folio_size(folio))
return;
- bch2_clear_page_bits(page);
+ bch2_clear_page_bits(&folio->page);
}
int bch2_releasepage(struct page *page, gfp_t gfp_mask)
readpages_iter.idx,
BIO_MAX_VECS);
struct bch_read_bio *rbio =
- rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
+ rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
+ GFP_NOFS, &c->bio_read),
opts);
readpages_iter.idx++;
- bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
rbio->bio.bi_end_io = bch2_readpages_end_io;
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct bch_read_bio *rbio;
- rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
+ rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read), opts);
rbio->bio.bi_end_io = bch2_readpages_end_io;
__bchfs_readpage(c, rbio, inode_inum(inode), page);
int ret;
DECLARE_COMPLETION_ONSTACK(done);
- rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
+ rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
io_opts(c, &inode->ei_inode));
rbio->bio.bi_private = &done;
rbio->bio.bi_end_io = bch2_read_single_page_end_io;
{
struct bch_write_op *op;
- w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS,
+ w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
+ REQ_OP_WRITE,
+ GFP_NOFS,
&c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio);
shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
iter->count -= shorten;
- bio = bio_alloc_bioset(GFP_KERNEL,
+ bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
&c->dio_read_bioset);
bio->bi_end_io = bch2_direct_IO_read_endio;
goto start;
while (iter->count) {
- bio = bio_alloc_bioset(GFP_KERNEL,
+ bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
&c->bio_read);
bio->bi_end_io = bch2_direct_IO_read_split_endio;
start:
if (!dio->iter.count)
break;
- bio_reset(bio);
+ bio_reset(bio, NULL, REQ_OP_WRITE);
reinit_completion(&dio->done);
}
locked = false;
}
- bio = bio_alloc_bioset(GFP_KERNEL,
+ bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_WRITE,
+ GFP_KERNEL,
&c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio);
init_completion(&dio->done);
vm_fault_t bch2_page_fault(struct vm_fault *);
vm_fault_t bch2_page_mkwrite(struct vm_fault *);
-void bch2_invalidatepage(struct page *, unsigned int, unsigned int);
+void bch2_invalidate_folio(struct folio *, size_t, size_t);
int bch2_releasepage(struct page *, gfp_t);
int bch2_migrate_page(struct address_space *, struct page *,
struct page *, enum migrate_mode);
.readpage = bch2_readpage,
.writepages = bch2_writepages,
.readahead = bch2_readahead,
- .set_page_dirty = __set_page_dirty_nobuffers,
+ .dirty_folio = filemap_dirty_folio,
.write_begin = bch2_write_begin,
.write_end = bch2_write_end,
- .invalidatepage = bch2_invalidatepage,
+ .invalidate_folio = bch2_invalidate_folio,
.releasepage = bch2_releasepage,
.direct_IO = noop_direct_IO,
#ifdef CONFIG_MIGRATION
#define x(_name, _bits) \
if (fieldnr++ == INODE_NR_FIELDS(inode.v)) { \
- memset(&unpacked->_name, 0, \
- sizeof(*unpacked) - \
- offsetof(struct bch_inode_unpacked, _name)); \
+ unsigned offset = offsetof(struct bch_inode_unpacked, _name);\
+ memset((void *) unpacked + offset, 0, \
+ sizeof(*unpacked) - offset); \
return 0; \
} \
\
ca = bch_dev_bkey_exists(c, ptr->dev);
if (to_entry(ptr + 1) < ptrs.end) {
- n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
- &ca->replica_set));
+ n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
+ GFP_NOIO, &ca->replica_set));
n->bio.bi_end_io = wbio->bio.bi_end_io;
n->bio.bi_private = wbio->bio.bi_private;
pages = min(pages, BIO_MAX_VECS);
- bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
+ bio = bio_alloc_bioset(NULL, pages, 0,
+ GFP_NOIO, &c->bio_write);
wbio = wbio_init(bio);
wbio->put_bio = true;
/* copy WRITE_SYNC flag */
goto err;
rbio_init(&(*rbio)->bio, opts);
- bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs, pages);
+ bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
GFP_NOIO))
goto err;
bio = &op->write.op.wbio.bio;
- bio_init(bio, bio->bi_inline_vecs, pages);
+ bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
ret = bch2_migrate_write_init(c, &op->write,
writepoint_hashed((unsigned long) current),
} else if (bounce) {
unsigned sectors = pick.crc.compressed_size;
- rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
+ rbio = rbio_init(bio_alloc_bioset(NULL,
DIV_ROUND_UP(sectors, PAGE_SECTORS),
+ 0,
+ GFP_NOIO,
&c->bio_read_split),
orig->opts);
* from the whole bio, in which case we don't want to retry and
* lose the error)
*/
- rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
- &c->bio_read_split),
+ rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO,
+ &c->bio_read_split),
orig->opts);
rbio->bio.bi_iter = iter;
rbio->split = true;
sectors);
bio = ca->journal.bio;
- bio_reset(bio);
- bio_set_dev(bio, ca->disk_sb.bdev);
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = ptr->offset;
bio->bi_end_io = journal_write_endio;
bio->bi_private = ca;
- bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
ca->prev_journal_sector = bio->bi_iter.bi_sector;
percpu_ref_get(&ca->io_ref);
bio = ca->journal.bio;
- bio_reset(bio);
- bio_set_dev(bio, ca->disk_sb.bdev);
- bio->bi_opf = REQ_OP_FLUSH;
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
bio->bi_end_io = journal_write_endio;
bio->bi_private = ca;
closure_bio_submit(bio, cl);
io->read_sectors = k.k->size;
io->write_sectors = k.k->size;
- bio_init(&io->write.op.wbio.bio, io->bi_inline_vecs, pages);
+ bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0);
bio_set_prio(&io->write.op.wbio.bio,
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
io->rbio.c = c;
io->rbio.opts = io_opts;
- bio_init(&io->rbio.bio, io->bi_inline_vecs, pages);
+ bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0);
io->rbio.bio.bi_vcnt = pages;
bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
io->rbio.bio.bi_iter.bi_size = sectors << 9;
size_t bytes;
int ret;
reread:
- bio_reset(sb->bio);
- bio_set_dev(sb->bio, sb->bdev);
+ bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
sb->bio->bi_iter.bi_sector = offset;
- bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
bch2_bio_map(sb->bio, sb->sb, sb->buffer_size);
ret = submit_bio_wait(sb->bio);
* Error reading primary superblock - read location of backup
* superblocks:
*/
- bio_reset(sb->bio);
- bio_set_dev(sb->bio, sb->bdev);
+ bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
- bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
/*
* use sb buffer to read layout, since sb buffer is page aligned but
* layout won't be:
struct bch_sb *sb = ca->disk_sb.sb;
struct bio *bio = ca->disk_sb.bio;
- bio_reset(bio);
- bio_set_dev(bio, ca->disk_sb.bdev);
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC|REQ_META);
bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb],
sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
null_nonce(), sb);
- bio_reset(bio);
- bio_set_dev(bio, ca->disk_sb.bdev);
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
bch2_bio_map(bio, sb,
roundup((size_t) vstruct_bytes(sb),
bdev_logical_block_size(ca->disk_sb.bdev)));
#include <linux/blkdev.h>
#include <linux/debugfs.h>
#include <linux/device.h>
-#include <linux/genhd.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/percpu.h>
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
#define KTYPE(type) \
-struct kobj_type type ## _ktype = { \
+static const struct attribute_group type ## _group = { \
+ .attrs = type ## _files \
+}; \
+ \
+static const struct attribute_group *type ## _groups[] = { \
+ &type ## _group, \
+ NULL \
+}; \
+ \
+static const struct kobj_type type ## _ktype = { \
.release = type ## _release, \
.sysfs_ops = &type ## _sysfs_ops, \
- .default_attrs = type ## _files \
+ .default_groups = type ## _groups \
}
static void bch2_fs_release(struct kobject *);
{
}
-static KTYPE(bch2_fs);
-static KTYPE(bch2_fs_counters);
-static KTYPE(bch2_fs_internal);
-static KTYPE(bch2_fs_opts_dir);
-static KTYPE(bch2_fs_time_stats);
-static KTYPE(bch2_dev);
+KTYPE(bch2_fs);
+KTYPE(bch2_fs_counters);
+KTYPE(bch2_fs_internal);
+KTYPE(bch2_fs_opts_dir);
+KTYPE(bch2_fs_time_stats);
+KTYPE(bch2_dev);
static struct kset *bcachefs_kset;
static LIST_HEAD(bch_fs_list);
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
+ bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c);
bch2_fs_quota_exit(c);
bch2_fs_fsio_exit(c);
bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
bch2_fs_btree_iter_init(c) ?:
bch2_fs_btree_interior_update_init(c) ?:
- bch2_fs_buckets_waiting_for_journal_init(c);
+ bch2_fs_buckets_waiting_for_journal_init(c) ?:
bch2_fs_subvolumes_init(c) ?:
bch2_fs_io_init(c) ?:
bch2_fs_encryption_init(c) ?:
{
if (buf->last_newline + buf->indent == buf->pos) {
buf->pos -= spaces;
- buf->buf[buf->pos] = 0;
+ buf->buf[buf->pos] = '\0';
}
buf->indent -= spaces;
}
}
}
-void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
+static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
{
- /*
- * most users will be overriding ->bi_bdev with a new target,
- * so we don't set nor calculate new physical/hw segment counts here
- */
- bio->bi_bdev = bio_src->bi_bdev;
bio_set_flag(bio, BIO_CLONED);
- bio->bi_opf = bio_src->bi_opf;
+ bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_iter = bio_src->bi_iter;
- bio->bi_io_vec = bio_src->bi_io_vec;
+ return 0;
}
-struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
+struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
+ gfp_t gfp, struct bio_set *bs)
{
- struct bio *b;
+ struct bio *bio;
- b = bio_alloc_bioset(gfp_mask, 0, bs);
- if (!b)
+ bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
+ if (!bio)
return NULL;
- __bio_clone_fast(b, bio);
- return b;
+ if (__bio_clone(bio, bio_src, gfp) < 0) {
+ bio_put(bio);
+ return NULL;
+ }
+ bio->bi_io_vec = bio_src->bi_io_vec;
+
+ return bio;
}
struct bio *bio_split(struct bio *bio, int sectors,
BUG_ON(sectors <= 0);
BUG_ON(sectors >= bio_sectors(bio));
- /*
- * Discards need a mutable bio_vec to accommodate the payload
- * required by the DSM TRIM and UNMAP commands.
- */
- if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
- split = bio_clone_bioset(bio, gfp, bs);
- else
- split = bio_clone_fast(bio, gfp, bs);
-
+ split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
if (!split)
return NULL;
bio->bi_end_io(bio);
}
-void bio_reset(struct bio *bio)
+void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
{
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
memset(bio, 0, BIO_RESET_BYTES);
- bio->bi_flags = flags;
+ bio->bi_bdev = bdev;
+ bio->bi_opf = opf;
+ bio->bi_flags = flags;
atomic_set(&bio->__bi_remaining, 1);
}
sizeof(struct bio_vec) * nr_iovecs, gfp_mask);
if (unlikely(!bio))
return NULL;
- bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
+ bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs, 0);
bio->bi_pool = NULL;
return bio;
}
return mempool_alloc(pool, gfp_mask);
}
-struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
+struct bio *bio_alloc_bioset(struct block_device *bdev,
+ unsigned nr_iovecs,
+ unsigned opf,
+ gfp_t gfp_mask,
+ struct bio_set *bs)
{
struct bio *bio;
void *p;
if (unlikely(!bvl))
goto err_free;
- bio_init(bio, bvl, nr_iovecs);
+ bio_init(bio, bdev, bvl, nr_iovecs, opf);
} else if (nr_iovecs) {
- bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
+ bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
} else {
- bio_init(bio, NULL, 0);
+ bio_init(bio, bdev, NULL, 0, opf);
}
bio->bi_pool = bs;
return NULL;
}
-struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
- struct bio_set *bs)
-{
- struct bvec_iter iter;
- struct bio_vec bv;
- struct bio *bio;
-
- bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
- if (!bio)
- return NULL;
-
- bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_opf = bio_src->bi_opf;
- bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
- bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
-
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- break;
- case REQ_OP_WRITE_SAME:
- bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
- break;
- default:
- bio_for_each_segment(bv, bio_src, iter)
- bio->bi_io_vec[bio->bi_vcnt++] = bv;
- break;
- }
-
- return bio;
-}
-
void bioset_exit(struct bio_set *bs)
{
mempool_exit(&bs->bio_pool);