btrfs: introduce btrfs_bio_for_each_block_all() helper

Currently if we want to iterate all blocks inside a bio, we do something
like this:

	bio_for_each_segment_all(bvec, bio, iter_all) {
		for (off = 0; off < bvec->bv_len; off += sectorsize) {
			/* Iterate blocks using bv + off */
		}
	}

That's fine for now, but it will not handle future bs > ps, as
bio_for_each_segment_all() is a single-page iterator, it will always
return a bvec that's no larger than a page.

But for bs > ps cases, we need a full folio (which covers at least one
block) so that we can work on the block.

To address this problem and handle future bs > ps cases better:

- Introduce a helper btrfs_bio_for_each_block_all()
  This helper will create a local bvec_iter, which has the size of the
  target bio. Then grab the current physical address of the current
  location, then advance the iterator by block size.

- Use btrfs_bio_for_each_block_all() to replace existing call sites
  Including:

  * set_bio_pages_uptodate() in raid56
  * verify_bio_data_sectors() in raid56

  Both will result much easier to read code.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo
2025-09-02 16:45:02 +09:30
committed by David Sterba
parent 9afc617265
commit 7425a28940
2 changed files with 44 additions and 29 deletions

View File

@@ -45,6 +45,30 @@ static inline phys_addr_t bio_iter_phys(struct bio *bio, struct bvec_iter *iter)
(paddr = bio_iter_phys((bio), (iter)), 1); \ (paddr = bio_iter_phys((bio), (iter)), 1); \
bio_advance_iter_single((bio), (iter), (blocksize))) bio_advance_iter_single((bio), (iter), (blocksize)))
/* Initialize a bvec_iter to the size of the specified bio. */
static inline struct bvec_iter init_bvec_iter_for_bio(struct bio *bio)
{
struct bio_vec *bvec;
u32 bio_size = 0;
int i;
bio_for_each_bvec_all(bvec, bio, i)
bio_size += bvec->bv_len;
return (struct bvec_iter) {
.bi_sector = 0,
.bi_size = bio_size,
.bi_idx = 0,
.bi_bvec_done = 0,
};
}
#define btrfs_bio_for_each_block_all(paddr, bio, blocksize) \
for (struct bvec_iter iter = init_bvec_iter_for_bio(bio); \
(iter).bi_size && \
(paddr = bio_iter_phys((bio), &(iter)), 1); \
bio_advance_iter_single((bio), &(iter), (blocksize)))
static inline void cond_wake_up(struct wait_queue_head *wq) static inline void cond_wake_up(struct wait_queue_head *wq)
{ {
/* /*

View File

@@ -1510,23 +1510,18 @@ static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
*/ */
static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio) static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
{ {
const u32 sectorsize = rbio->bioc->fs_info->sectorsize; const u32 blocksize = rbio->bioc->fs_info->sectorsize;
struct bio_vec *bvec; phys_addr_t paddr;
struct bvec_iter_all iter_all;
ASSERT(!bio_flagged(bio, BIO_CLONED)); ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) { btrfs_bio_for_each_block_all(paddr, bio, blocksize) {
struct sector_ptr *sector; struct sector_ptr *sector = find_stripe_sector(rbio, paddr);
phys_addr_t paddr = bvec_phys(bvec);
for (u32 off = 0; off < bvec->bv_len; off += sectorsize) {
sector = find_stripe_sector(rbio, paddr + off);
ASSERT(sector); ASSERT(sector);
if (sector) if (sector)
sector->uptodate = 1; sector->uptodate = 1;
} }
}
} }
static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio) static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
@@ -1572,8 +1567,7 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
{ {
struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
int total_sector_nr = get_bio_sector_nr(rbio, bio); int total_sector_nr = get_bio_sector_nr(rbio, bio);
struct bio_vec *bvec; phys_addr_t paddr;
struct bvec_iter_all iter_all;
/* No data csum for the whole stripe, no need to verify. */ /* No data csum for the whole stripe, no need to verify. */
if (!rbio->csum_bitmap || !rbio->csum_buf) if (!rbio->csum_bitmap || !rbio->csum_buf)
@@ -1583,23 +1577,20 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors) if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
return; return;
bio_for_each_segment_all(bvec, bio, iter_all) { btrfs_bio_for_each_block_all(paddr, bio, fs_info->sectorsize) {
for (u32 off = 0; off < bvec->bv_len;
off += fs_info->sectorsize, total_sector_nr++) {
u8 csum_buf[BTRFS_CSUM_SIZE]; u8 csum_buf[BTRFS_CSUM_SIZE];
u8 *expected_csum = rbio->csum_buf + u8 *expected_csum = rbio->csum_buf + total_sector_nr * fs_info->csum_size;
total_sector_nr * fs_info->csum_size;
int ret; int ret;
/* No csum for this sector, skip to the next sector. */ /* No csum for this sector, skip to the next sector. */
if (!test_bit(total_sector_nr, rbio->csum_bitmap)) if (!test_bit(total_sector_nr, rbio->csum_bitmap))
continue; continue;
ret = btrfs_check_block_csum(fs_info, bvec_phys(bvec) + off, ret = btrfs_check_block_csum(fs_info, paddr,
csum_buf, expected_csum); csum_buf, expected_csum);
if (ret < 0) if (ret < 0)
set_bit(total_sector_nr, rbio->error_bitmap); set_bit(total_sector_nr, rbio->error_bitmap);
} total_sector_nr++;
} }
} }