Skip to content

Commit

Permalink
btrfs: compression: migrate compression/decompression paths to folios
Browse files Browse the repository at this point in the history
For both compression and decompression paths, we always require a
"struct page **pages" and "unsigned long nr_pages", this involves quite
some part of the btrfs compression paths:

- All the compression entry points

- compressed_bio structure
  This affects both compression and decompression.

- async_extent structure

Unfortunately with all those involved parts, there is no good way to
split the conversion into smaller patches while still passing compiling.
So do this in one big conversion in one go.

Please note this is direct page->folio conversion, no change on the page
sized folio requirement yet.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
[ minor style fixups ]
Signed-off-by: David Sterba <dsterba@suse.com>
  • Loading branch information
adam900710 authored and kdave committed May 7, 2024
1 parent 11e03f2 commit 400b172
Show file tree
Hide file tree
Showing 6 changed files with 251 additions and 255 deletions.
90 changes: 45 additions & 45 deletions fs/btrfs/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,20 +90,20 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
}

static int compression_compress_pages(int type, struct list_head *ws,
struct address_space *mapping, u64 start, struct page **pages,
unsigned long *out_pages, unsigned long *total_in,
unsigned long *total_out)
struct address_space *mapping, u64 start,
struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out)
{
switch (type) {
case BTRFS_COMPRESS_ZLIB:
return zlib_compress_pages(ws, mapping, start, pages,
out_pages, total_in, total_out);
return zlib_compress_folios(ws, mapping, start, folios,
out_folios, total_in, total_out);
case BTRFS_COMPRESS_LZO:
return lzo_compress_pages(ws, mapping, start, pages,
out_pages, total_in, total_out);
return lzo_compress_folios(ws, mapping, start, folios,
out_folios, total_in, total_out);
case BTRFS_COMPRESS_ZSTD:
return zstd_compress_pages(ws, mapping, start, pages,
out_pages, total_in, total_out);
return zstd_compress_folios(ws, mapping, start, folios,
out_folios, total_in, total_out);
case BTRFS_COMPRESS_NONE:
default:
/*
Expand All @@ -115,7 +115,7 @@ static int compression_compress_pages(int type, struct list_head *ws,
* Not a big deal, just need to inform caller that we
* haven't allocated any pages yet.
*/
*out_pages = 0;
*out_folios = 0;
return -E2BIG;
}
}
Expand Down Expand Up @@ -158,11 +158,11 @@ static int compression_decompress(int type, struct list_head *ws,
}
}

static void btrfs_free_compressed_pages(struct compressed_bio *cb)
static void btrfs_free_compressed_folios(struct compressed_bio *cb)
{
for (unsigned int i = 0; i < cb->nr_pages; i++)
btrfs_free_compr_folio(page_folio(cb->compressed_pages[i]));
kfree(cb->compressed_pages);
for (unsigned int i = 0; i < cb->nr_folios; i++)
btrfs_free_compr_folio(cb->compressed_folios[i]);
kfree(cb->compressed_folios);
}

static int btrfs_decompress_bio(struct compressed_bio *cb);
Expand Down Expand Up @@ -269,7 +269,7 @@ static void end_bbio_comprssed_read(struct btrfs_bio *bbio)
if (!status)
status = errno_to_blk_status(btrfs_decompress_bio(cb));

btrfs_free_compressed_pages(cb);
btrfs_free_compressed_folios(cb);
btrfs_bio_end_io(cb->orig_bbio, status);
bio_put(&bbio->bio);
}
Expand Down Expand Up @@ -323,7 +323,7 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work)
end_compressed_writeback(cb);
/* Note, our inode could be gone now */

btrfs_free_compressed_pages(cb);
btrfs_free_compressed_folios(cb);
bio_put(&cb->bbio.bio);
}

Expand All @@ -342,17 +342,19 @@ static void end_bbio_comprssed_write(struct btrfs_bio *bbio)
queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
}

static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb)
static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
{
struct bio *bio = &cb->bbio.bio;
u32 offset = 0;

while (offset < cb->compressed_len) {
int ret;
u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);

/* Maximum compressed extent is smaller than bio size limit. */
__bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT],
len, 0);
ret = bio_add_folio(bio, cb->compressed_folios[offset >> PAGE_SHIFT],
len, 0);
ASSERT(ret);
offset += len;
}
}
Expand All @@ -367,8 +369,8 @@ static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb)
* the end io hooks.
*/
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
struct page **compressed_pages,
unsigned int nr_pages,
struct folio **compressed_folios,
unsigned int nr_folios,
blk_opf_t write_flags,
bool writeback)
{
Expand All @@ -384,14 +386,14 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
end_bbio_comprssed_write);
cb->start = ordered->file_offset;
cb->len = ordered->num_bytes;
cb->compressed_pages = compressed_pages;
cb->compressed_folios = compressed_folios;
cb->compressed_len = ordered->disk_num_bytes;
cb->writeback = writeback;
INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
cb->nr_pages = nr_pages;
cb->nr_folios = nr_folios;
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
cb->bbio.ordered = ordered;
btrfs_add_compressed_bio_pages(cb);
btrfs_add_compressed_bio_folios(cb);

btrfs_submit_bio(&cb->bbio, 0);
}
Expand Down Expand Up @@ -599,14 +601,14 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)

free_extent_map(em);

cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
if (!cb->compressed_pages) {
cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
if (!cb->compressed_folios) {
ret = BLK_STS_RESOURCE;
goto out_free_bio;
}

ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages, 0);
ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios, 0);
if (ret2) {
ret = BLK_STS_RESOURCE;
goto out_free_compressed_pages;
Expand All @@ -618,7 +620,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
/* include any pages we added in add_ra-bio_pages */
cb->len = bbio->bio.bi_iter.bi_size;
cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
btrfs_add_compressed_bio_pages(cb);
btrfs_add_compressed_bio_folios(cb);

if (memstall)
psi_memstall_leave(&pflags);
Expand All @@ -627,7 +629,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
return;

out_free_compressed_pages:
kfree(cb->compressed_pages);
kfree(cb->compressed_folios);
out_free_bio:
bio_put(&cb->bbio.bio);
out:
Expand Down Expand Up @@ -975,25 +977,25 @@ static unsigned int btrfs_compress_set_level(int type, unsigned level)
}

/* Wrapper around find_get_page(), with extra error message. */
int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
struct page **in_page_ret)
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
struct folio **in_folio_ret)
{
struct page *in_page;
struct folio *in_folio;

/*
* The compressed write path should have the page locked already, thus
* we only need to grab one reference of the page cache.
* The compressed write path should have the folio locked already, thus
* we only need to grab one reference.
*/
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
if (unlikely(!in_page)) {
in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
if (IS_ERR(in_folio)) {
struct btrfs_inode *inode = BTRFS_I(mapping->host);

btrfs_crit(inode->root->fs_info,
"failed to get page cache, root %lld ino %llu file offset %llu",
inode->root->root_key.objectid, btrfs_ino(inode), start);
return -ENOENT;
}
*in_page_ret = in_page;
*in_folio_ret = in_folio;
return 0;
}

Expand All @@ -1017,11 +1019,9 @@ int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
* @total_out is an in/out parameter, must be set to the input length and will
* be also used to return the total number of compressed bytes
*/
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
u64 start, struct page **pages,
unsigned long *out_pages,
unsigned long *total_in,
unsigned long *total_out)
int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out)
{
int type = btrfs_compress_type(type_level);
int level = btrfs_compress_level(type_level);
Expand All @@ -1030,8 +1030,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,

level = btrfs_compress_set_level(type, level);
workspace = get_workspace(type, level);
ret = compression_compress_pages(type, workspace, mapping, start, pages,
out_pages, total_in, total_out);
ret = compression_compress_pages(type, workspace, mapping, start, folios,
out_folios, total_in, total_out);
put_workspace(type, workspace);
return ret;
}
Expand Down
39 changes: 18 additions & 21 deletions fs/btrfs/compression.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,11 @@ static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
#define BTRFS_ZLIB_DEFAULT_LEVEL 3

struct compressed_bio {
/* Number of compressed pages in the array */
unsigned int nr_pages;
/* Number of compressed folios in the array. */
unsigned int nr_folios;

/* the pages with the compressed data on them */
struct page **compressed_pages;
/* The folios with the compressed data on them. */
struct folio **compressed_folios;

/* starting offset in the inode for our pages */
u64 start;
Expand Down Expand Up @@ -85,21 +85,18 @@ static inline unsigned int btrfs_compress_level(unsigned int type_level)
int __init btrfs_init_compress(void);
void __cold btrfs_exit_compress(void);

int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
u64 start, struct page **pages,
unsigned long *out_pages,
unsigned long *total_in,
unsigned long *total_out);
int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
unsigned long start_byte, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);

void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
struct page **compressed_pages,
unsigned int nr_pages,
blk_opf_t write_flags,
bool writeback);
struct folio **compressed_folios,
unsigned int nr_folios, blk_opf_t write_flags,
bool writeback);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);

unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
Expand Down Expand Up @@ -149,11 +146,11 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len);

int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);

int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
struct page **in_page_ret);
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
struct folio **in_folio_ret);

int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, const u8 *data_in,
Expand All @@ -163,8 +160,8 @@ struct list_head *zlib_alloc_workspace(unsigned int level);
void zlib_free_workspace(struct list_head *ws);
struct list_head *zlib_get_workspace(unsigned int level);

int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
Expand All @@ -173,8 +170,8 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct list_head *lzo_alloc_workspace(unsigned int level);
void lzo_free_workspace(struct list_head *ws);

int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress(struct list_head *ws, const u8 *data_in,
Expand Down
Loading

0 comments on commit 400b172

Please sign in to comment.