forked from Minki/linux
zram: reorganize code layout
This patch looks big, but basically it just moves code blocks. No functional changes. Our current code layout looks like a sandwitch. For example, a) between read/write handlers, we have update_used_max() helper function: static int zram_decompress_page static int zram_bvec_read static inline void update_used_max static int zram_bvec_write static int zram_bvec_rw b) RW request handlers __zram_make_request/zram_bio_discard are divided by sysfs attr reset_store() function and corresponding zram_reset_device() handler: static void zram_bio_discard static void zram_reset_device static ssize_t disksize_store static ssize_t reset_store static void __zram_make_request c) we first a bunch of sysfs read/store functions. then a number of one-liners, then helper functions, RW functions, sysfs functions, helper functions again, and so on. Reorganize layout to be more logically grouped (a brief description, `cat zram_drv.c | grep static` gives a bigger picture): -- one-liners: zram_test_flag/etc. -- helpers: is_partial_io/update_position/etc -- sysfs attr show/store functions + ZRAM_ATTR_RO() generated stats show() functions exception: reset and disksize store functions are required to be after meta() functions. because we do device create/destroy actions in these sysfs handlers. -- "mm" functions: meta get/put, meta alloc/free, page free static inline bool zram_meta_get static inline void zram_meta_put static void zram_meta_free static struct zram_meta *zram_meta_alloc static void zram_free_page -- a block of I/O functions static int zram_decompress_page static int zram_bvec_read static int zram_bvec_write static void zram_bio_discard static int zram_bvec_rw static void __zram_make_request static void zram_make_request static void zram_slot_free_notify static int zram_rw_page -- device contol: add/remove/init/reset functions (+zram-control class will sit here) static int zram_reset_device static ssize_t reset_store static ssize_t disksize_store static int zram_add static void zram_remove static int __init zram_init static void __exit zram_exit Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Acked-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
85508ec6cb
commit
522698d7ca
@ -70,33 +70,117 @@ static inline struct zram *dev_to_zram(struct device *dev)
|
||||
return (struct zram *)dev_to_disk(dev)->private_data;
|
||||
}
|
||||
|
||||
static ssize_t compact_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
/* flag operations needs meta->tb_lock */
|
||||
static int zram_test_flag(struct zram_meta *meta, u32 index,
|
||||
enum zram_pageflags flag)
|
||||
{
|
||||
unsigned long nr_migrated;
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
struct zram_meta *meta;
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
if (!init_done(zram)) {
|
||||
up_read(&zram->init_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
meta = zram->meta;
|
||||
nr_migrated = zs_compact(meta->mem_pool);
|
||||
atomic64_add(nr_migrated, &zram->stats.num_migrated);
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
return len;
|
||||
return meta->table[index].value & BIT(flag);
|
||||
}
|
||||
|
||||
static ssize_t disksize_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
static void zram_set_flag(struct zram_meta *meta, u32 index,
|
||||
enum zram_pageflags flag)
|
||||
{
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
meta->table[index].value |= BIT(flag);
|
||||
}
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
|
||||
static void zram_clear_flag(struct zram_meta *meta, u32 index,
|
||||
enum zram_pageflags flag)
|
||||
{
|
||||
meta->table[index].value &= ~BIT(flag);
|
||||
}
|
||||
|
||||
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
|
||||
{
|
||||
return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
|
||||
}
|
||||
|
||||
static void zram_set_obj_size(struct zram_meta *meta,
|
||||
u32 index, size_t size)
|
||||
{
|
||||
unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
|
||||
|
||||
meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
|
||||
}
|
||||
|
||||
static inline int is_partial_io(struct bio_vec *bvec)
|
||||
{
|
||||
return bvec->bv_len != PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if request is within bounds and aligned on zram logical blocks.
|
||||
*/
|
||||
static inline int valid_io_request(struct zram *zram,
|
||||
sector_t start, unsigned int size)
|
||||
{
|
||||
u64 end, bound;
|
||||
|
||||
/* unaligned request */
|
||||
if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
|
||||
return 0;
|
||||
if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
|
||||
return 0;
|
||||
|
||||
end = start + (size >> SECTOR_SHIFT);
|
||||
bound = zram->disksize >> SECTOR_SHIFT;
|
||||
/* out of range range */
|
||||
if (unlikely(start >= bound || end > bound || start > end))
|
||||
return 0;
|
||||
|
||||
/* I/O request is valid */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
|
||||
{
|
||||
if (*offset + bvec->bv_len >= PAGE_SIZE)
|
||||
(*index)++;
|
||||
*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
|
||||
}
|
||||
|
||||
static inline void update_used_max(struct zram *zram,
|
||||
const unsigned long pages)
|
||||
{
|
||||
unsigned long old_max, cur_max;
|
||||
|
||||
old_max = atomic_long_read(&zram->stats.max_used_pages);
|
||||
|
||||
do {
|
||||
cur_max = old_max;
|
||||
if (pages > cur_max)
|
||||
old_max = atomic_long_cmpxchg(
|
||||
&zram->stats.max_used_pages, cur_max, pages);
|
||||
} while (old_max != cur_max);
|
||||
}
|
||||
|
||||
static int page_zero_filled(void *ptr)
|
||||
{
|
||||
unsigned int pos;
|
||||
unsigned long *page;
|
||||
|
||||
page = (unsigned long *)ptr;
|
||||
|
||||
for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
|
||||
if (page[pos])
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void handle_zero_page(struct bio_vec *bvec)
|
||||
{
|
||||
struct page *page = bvec->bv_page;
|
||||
void *user_mem;
|
||||
|
||||
user_mem = kmap_atomic(page);
|
||||
if (is_partial_io(bvec))
|
||||
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
|
||||
else
|
||||
clear_page(user_mem);
|
||||
kunmap_atomic(user_mem);
|
||||
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
|
||||
static ssize_t initstate_show(struct device *dev,
|
||||
@ -112,6 +196,14 @@ static ssize_t initstate_show(struct device *dev,
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n", val);
|
||||
}
|
||||
|
||||
static ssize_t disksize_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
|
||||
}
|
||||
|
||||
static ssize_t orig_data_size_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
@ -139,19 +231,6 @@ static ssize_t mem_used_total_show(struct device *dev,
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static ssize_t max_comp_streams_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
int val;
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
val = zram->max_comp_streams;
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
|
||||
}
|
||||
|
||||
static ssize_t mem_limit_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
@ -221,6 +300,19 @@ static ssize_t mem_used_max_store(struct device *dev,
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t max_comp_streams_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
int val;
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
val = zram->max_comp_streams;
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
|
||||
}
|
||||
|
||||
static ssize_t max_comp_streams_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
{
|
||||
@ -278,65 +370,95 @@ static ssize_t comp_algorithm_store(struct device *dev,
|
||||
return len;
|
||||
}
|
||||
|
||||
/* flag operations needs meta->tb_lock */
|
||||
static int zram_test_flag(struct zram_meta *meta, u32 index,
|
||||
enum zram_pageflags flag)
|
||||
static ssize_t compact_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
{
|
||||
return meta->table[index].value & BIT(flag);
|
||||
unsigned long nr_migrated;
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
struct zram_meta *meta;
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
if (!init_done(zram)) {
|
||||
up_read(&zram->init_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
meta = zram->meta;
|
||||
nr_migrated = zs_compact(meta->mem_pool);
|
||||
atomic64_add(nr_migrated, &zram->stats.num_migrated);
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void zram_set_flag(struct zram_meta *meta, u32 index,
|
||||
enum zram_pageflags flag)
|
||||
static ssize_t io_stat_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
meta->table[index].value |= BIT(flag);
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
ssize_t ret;
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
ret = scnprintf(buf, PAGE_SIZE,
|
||||
"%8llu %8llu %8llu %8llu\n",
|
||||
(u64)atomic64_read(&zram->stats.failed_reads),
|
||||
(u64)atomic64_read(&zram->stats.failed_writes),
|
||||
(u64)atomic64_read(&zram->stats.invalid_io),
|
||||
(u64)atomic64_read(&zram->stats.notify_free));
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void zram_clear_flag(struct zram_meta *meta, u32 index,
|
||||
enum zram_pageflags flag)
|
||||
static ssize_t mm_stat_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
meta->table[index].value &= ~BIT(flag);
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
u64 orig_size, mem_used = 0;
|
||||
long max_used;
|
||||
ssize_t ret;
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
if (init_done(zram))
|
||||
mem_used = zs_get_total_pages(zram->meta->mem_pool);
|
||||
|
||||
orig_size = atomic64_read(&zram->stats.pages_stored);
|
||||
max_used = atomic_long_read(&zram->stats.max_used_pages);
|
||||
|
||||
ret = scnprintf(buf, PAGE_SIZE,
|
||||
"%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
|
||||
orig_size << PAGE_SHIFT,
|
||||
(u64)atomic64_read(&zram->stats.compr_data_size),
|
||||
mem_used << PAGE_SHIFT,
|
||||
zram->limit_pages << PAGE_SHIFT,
|
||||
max_used << PAGE_SHIFT,
|
||||
(u64)atomic64_read(&zram->stats.zero_pages),
|
||||
(u64)atomic64_read(&zram->stats.num_migrated));
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
|
||||
static DEVICE_ATTR_RO(io_stat);
|
||||
static DEVICE_ATTR_RO(mm_stat);
|
||||
ZRAM_ATTR_RO(num_reads);
|
||||
ZRAM_ATTR_RO(num_writes);
|
||||
ZRAM_ATTR_RO(failed_reads);
|
||||
ZRAM_ATTR_RO(failed_writes);
|
||||
ZRAM_ATTR_RO(invalid_io);
|
||||
ZRAM_ATTR_RO(notify_free);
|
||||
ZRAM_ATTR_RO(zero_pages);
|
||||
ZRAM_ATTR_RO(compr_data_size);
|
||||
|
||||
static inline bool zram_meta_get(struct zram *zram)
|
||||
{
|
||||
return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
|
||||
if (atomic_inc_not_zero(&zram->refcount))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void zram_set_obj_size(struct zram_meta *meta,
|
||||
u32 index, size_t size)
|
||||
static inline void zram_meta_put(struct zram *zram)
|
||||
{
|
||||
unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
|
||||
|
||||
meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
|
||||
}
|
||||
|
||||
static inline int is_partial_io(struct bio_vec *bvec)
|
||||
{
|
||||
return bvec->bv_len != PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if request is within bounds and aligned on zram logical blocks.
|
||||
*/
|
||||
static inline int valid_io_request(struct zram *zram,
|
||||
sector_t start, unsigned int size)
|
||||
{
|
||||
u64 end, bound;
|
||||
|
||||
/* unaligned request */
|
||||
if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
|
||||
return 0;
|
||||
if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
|
||||
return 0;
|
||||
|
||||
end = start + (size >> SECTOR_SHIFT);
|
||||
bound = zram->disksize >> SECTOR_SHIFT;
|
||||
/* out of range range */
|
||||
if (unlikely(start >= bound || end > bound || start > end))
|
||||
return 0;
|
||||
|
||||
/* I/O request is valid */
|
||||
return 1;
|
||||
atomic_dec(&zram->refcount);
|
||||
}
|
||||
|
||||
static void zram_meta_free(struct zram_meta *meta, u64 disksize)
|
||||
@ -390,56 +512,6 @@ out_error:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool zram_meta_get(struct zram *zram)
|
||||
{
|
||||
if (atomic_inc_not_zero(&zram->refcount))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void zram_meta_put(struct zram *zram)
|
||||
{
|
||||
atomic_dec(&zram->refcount);
|
||||
}
|
||||
|
||||
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
|
||||
{
|
||||
if (*offset + bvec->bv_len >= PAGE_SIZE)
|
||||
(*index)++;
|
||||
*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
|
||||
}
|
||||
|
||||
static int page_zero_filled(void *ptr)
|
||||
{
|
||||
unsigned int pos;
|
||||
unsigned long *page;
|
||||
|
||||
page = (unsigned long *)ptr;
|
||||
|
||||
for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
|
||||
if (page[pos])
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void handle_zero_page(struct bio_vec *bvec)
|
||||
{
|
||||
struct page *page = bvec->bv_page;
|
||||
void *user_mem;
|
||||
|
||||
user_mem = kmap_atomic(page);
|
||||
if (is_partial_io(bvec))
|
||||
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
|
||||
else
|
||||
clear_page(user_mem);
|
||||
kunmap_atomic(user_mem);
|
||||
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* To protect concurrent access to the same index entry,
|
||||
* caller should hold this table index entry's bit_spinlock to
|
||||
@ -557,21 +629,6 @@ out_cleanup:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void update_used_max(struct zram *zram,
|
||||
const unsigned long pages)
|
||||
{
|
||||
unsigned long old_max, cur_max;
|
||||
|
||||
old_max = atomic_long_read(&zram->stats.max_used_pages);
|
||||
|
||||
do {
|
||||
cur_max = old_max;
|
||||
if (pages > cur_max)
|
||||
old_max = atomic_long_cmpxchg(
|
||||
&zram->stats.max_used_pages, cur_max, pages);
|
||||
} while (old_max != cur_max);
|
||||
}
|
||||
|
||||
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||
int offset)
|
||||
{
|
||||
@ -699,35 +756,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||
int offset, int rw)
|
||||
{
|
||||
unsigned long start_time = jiffies;
|
||||
int ret;
|
||||
|
||||
generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
|
||||
&zram->disk->part0);
|
||||
|
||||
if (rw == READ) {
|
||||
atomic64_inc(&zram->stats.num_reads);
|
||||
ret = zram_bvec_read(zram, bvec, index, offset);
|
||||
} else {
|
||||
atomic64_inc(&zram->stats.num_writes);
|
||||
ret = zram_bvec_write(zram, bvec, index, offset);
|
||||
}
|
||||
|
||||
generic_end_io_acct(rw, &zram->disk->part0, start_time);
|
||||
|
||||
if (unlikely(ret)) {
|
||||
if (rw == READ)
|
||||
atomic64_inc(&zram->stats.failed_reads);
|
||||
else
|
||||
atomic64_inc(&zram->stats.failed_writes);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* zram_bio_discard - handler on discard request
|
||||
* @index: physical block index in PAGE_SIZE units
|
||||
@ -767,6 +795,171 @@ static void zram_bio_discard(struct zram *zram, u32 index,
|
||||
}
|
||||
}
|
||||
|
||||
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||
int offset, int rw)
|
||||
{
|
||||
unsigned long start_time = jiffies;
|
||||
int ret;
|
||||
|
||||
generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
|
||||
&zram->disk->part0);
|
||||
|
||||
if (rw == READ) {
|
||||
atomic64_inc(&zram->stats.num_reads);
|
||||
ret = zram_bvec_read(zram, bvec, index, offset);
|
||||
} else {
|
||||
atomic64_inc(&zram->stats.num_writes);
|
||||
ret = zram_bvec_write(zram, bvec, index, offset);
|
||||
}
|
||||
|
||||
generic_end_io_acct(rw, &zram->disk->part0, start_time);
|
||||
|
||||
if (unlikely(ret)) {
|
||||
if (rw == READ)
|
||||
atomic64_inc(&zram->stats.failed_reads);
|
||||
else
|
||||
atomic64_inc(&zram->stats.failed_writes);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __zram_make_request(struct zram *zram, struct bio *bio)
|
||||
{
|
||||
int offset, rw;
|
||||
u32 index;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
|
||||
index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
||||
offset = (bio->bi_iter.bi_sector &
|
||||
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
|
||||
|
||||
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
||||
zram_bio_discard(zram, index, offset, bio);
|
||||
bio_endio(bio, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
rw = bio_data_dir(bio);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
int max_transfer_size = PAGE_SIZE - offset;
|
||||
|
||||
if (bvec.bv_len > max_transfer_size) {
|
||||
/*
|
||||
* zram_bvec_rw() can only make operation on a single
|
||||
* zram page. Split the bio vector.
|
||||
*/
|
||||
struct bio_vec bv;
|
||||
|
||||
bv.bv_page = bvec.bv_page;
|
||||
bv.bv_len = max_transfer_size;
|
||||
bv.bv_offset = bvec.bv_offset;
|
||||
|
||||
if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
|
||||
goto out;
|
||||
|
||||
bv.bv_len = bvec.bv_len - max_transfer_size;
|
||||
bv.bv_offset += max_transfer_size;
|
||||
if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
|
||||
goto out;
|
||||
} else
|
||||
if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
|
||||
goto out;
|
||||
|
||||
update_position(&index, &offset, &bvec);
|
||||
}
|
||||
|
||||
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
bio_endio(bio, 0);
|
||||
return;
|
||||
|
||||
out:
|
||||
bio_io_error(bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handler function for all zram I/O requests.
|
||||
*/
|
||||
static void zram_make_request(struct request_queue *queue, struct bio *bio)
|
||||
{
|
||||
struct zram *zram = queue->queuedata;
|
||||
|
||||
if (unlikely(!zram_meta_get(zram)))
|
||||
goto error;
|
||||
|
||||
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
|
||||
bio->bi_iter.bi_size)) {
|
||||
atomic64_inc(&zram->stats.invalid_io);
|
||||
goto put_zram;
|
||||
}
|
||||
|
||||
__zram_make_request(zram, bio);
|
||||
zram_meta_put(zram);
|
||||
return;
|
||||
put_zram:
|
||||
zram_meta_put(zram);
|
||||
error:
|
||||
bio_io_error(bio);
|
||||
}
|
||||
|
||||
static void zram_slot_free_notify(struct block_device *bdev,
|
||||
unsigned long index)
|
||||
{
|
||||
struct zram *zram;
|
||||
struct zram_meta *meta;
|
||||
|
||||
zram = bdev->bd_disk->private_data;
|
||||
meta = zram->meta;
|
||||
|
||||
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
zram_free_page(zram, index);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
atomic64_inc(&zram->stats.notify_free);
|
||||
}
|
||||
|
||||
static int zram_rw_page(struct block_device *bdev, sector_t sector,
|
||||
struct page *page, int rw)
|
||||
{
|
||||
int offset, err = -EIO;
|
||||
u32 index;
|
||||
struct zram *zram;
|
||||
struct bio_vec bv;
|
||||
|
||||
zram = bdev->bd_disk->private_data;
|
||||
if (unlikely(!zram_meta_get(zram)))
|
||||
goto out;
|
||||
|
||||
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
|
||||
atomic64_inc(&zram->stats.invalid_io);
|
||||
err = -EINVAL;
|
||||
goto put_zram;
|
||||
}
|
||||
|
||||
index = sector >> SECTORS_PER_PAGE_SHIFT;
|
||||
offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
|
||||
|
||||
bv.bv_page = page;
|
||||
bv.bv_len = PAGE_SIZE;
|
||||
bv.bv_offset = 0;
|
||||
|
||||
err = zram_bvec_rw(zram, &bv, index, offset, rw);
|
||||
put_zram:
|
||||
zram_meta_put(zram);
|
||||
out:
|
||||
/*
|
||||
* If I/O fails, just return error(ie, non-zero) without
|
||||
* calling page_endio.
|
||||
* It causes resubmit the I/O with bio request by upper functions
|
||||
* of rw_page(e.g., swap_readpage, __swap_writepage) and
|
||||
* bio->bi_end_io does things to handle the error
|
||||
* (e.g., SetPageError, set_page_dirty and extra works).
|
||||
*/
|
||||
if (err == 0)
|
||||
page_endio(page, rw, 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void zram_reset_device(struct zram *zram)
|
||||
{
|
||||
struct zram_meta *meta;
|
||||
@ -915,142 +1108,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __zram_make_request(struct zram *zram, struct bio *bio)
|
||||
{
|
||||
int offset, rw;
|
||||
u32 index;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
|
||||
index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
||||
offset = (bio->bi_iter.bi_sector &
|
||||
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
|
||||
|
||||
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
||||
zram_bio_discard(zram, index, offset, bio);
|
||||
bio_endio(bio, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
rw = bio_data_dir(bio);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
int max_transfer_size = PAGE_SIZE - offset;
|
||||
|
||||
if (bvec.bv_len > max_transfer_size) {
|
||||
/*
|
||||
* zram_bvec_rw() can only make operation on a single
|
||||
* zram page. Split the bio vector.
|
||||
*/
|
||||
struct bio_vec bv;
|
||||
|
||||
bv.bv_page = bvec.bv_page;
|
||||
bv.bv_len = max_transfer_size;
|
||||
bv.bv_offset = bvec.bv_offset;
|
||||
|
||||
if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
|
||||
goto out;
|
||||
|
||||
bv.bv_len = bvec.bv_len - max_transfer_size;
|
||||
bv.bv_offset += max_transfer_size;
|
||||
if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
|
||||
goto out;
|
||||
} else
|
||||
if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
|
||||
goto out;
|
||||
|
||||
update_position(&index, &offset, &bvec);
|
||||
}
|
||||
|
||||
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
bio_endio(bio, 0);
|
||||
return;
|
||||
|
||||
out:
|
||||
bio_io_error(bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handler function for all zram I/O requests.
|
||||
*/
|
||||
static void zram_make_request(struct request_queue *queue, struct bio *bio)
|
||||
{
|
||||
struct zram *zram = queue->queuedata;
|
||||
|
||||
if (unlikely(!zram_meta_get(zram)))
|
||||
goto error;
|
||||
|
||||
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
|
||||
bio->bi_iter.bi_size)) {
|
||||
atomic64_inc(&zram->stats.invalid_io);
|
||||
goto put_zram;
|
||||
}
|
||||
|
||||
__zram_make_request(zram, bio);
|
||||
zram_meta_put(zram);
|
||||
return;
|
||||
put_zram:
|
||||
zram_meta_put(zram);
|
||||
error:
|
||||
bio_io_error(bio);
|
||||
}
|
||||
|
||||
static void zram_slot_free_notify(struct block_device *bdev,
|
||||
unsigned long index)
|
||||
{
|
||||
struct zram *zram;
|
||||
struct zram_meta *meta;
|
||||
|
||||
zram = bdev->bd_disk->private_data;
|
||||
meta = zram->meta;
|
||||
|
||||
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
zram_free_page(zram, index);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
atomic64_inc(&zram->stats.notify_free);
|
||||
}
|
||||
|
||||
static int zram_rw_page(struct block_device *bdev, sector_t sector,
|
||||
struct page *page, int rw)
|
||||
{
|
||||
int offset, err = -EIO;
|
||||
u32 index;
|
||||
struct zram *zram;
|
||||
struct bio_vec bv;
|
||||
|
||||
zram = bdev->bd_disk->private_data;
|
||||
if (unlikely(!zram_meta_get(zram)))
|
||||
goto out;
|
||||
|
||||
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
|
||||
atomic64_inc(&zram->stats.invalid_io);
|
||||
err = -EINVAL;
|
||||
goto put_zram;
|
||||
}
|
||||
|
||||
index = sector >> SECTORS_PER_PAGE_SHIFT;
|
||||
offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
|
||||
|
||||
bv.bv_page = page;
|
||||
bv.bv_len = PAGE_SIZE;
|
||||
bv.bv_offset = 0;
|
||||
|
||||
err = zram_bvec_rw(zram, &bv, index, offset, rw);
|
||||
put_zram:
|
||||
zram_meta_put(zram);
|
||||
out:
|
||||
/*
|
||||
* If I/O fails, just return error(ie, non-zero) without
|
||||
* calling page_endio.
|
||||
* It causes resubmit the I/O with bio request by upper functions
|
||||
* of rw_page(e.g., swap_readpage, __swap_writepage) and
|
||||
* bio->bi_end_io does things to handle the error
|
||||
* (e.g., SetPageError, set_page_dirty and extra works).
|
||||
*/
|
||||
if (err == 0)
|
||||
page_endio(page, rw, 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct block_device_operations zram_devops = {
|
||||
.swap_slot_free_notify = zram_slot_free_notify,
|
||||
.rw_page = zram_rw_page,
|
||||
@ -1068,64 +1125,6 @@ static DEVICE_ATTR_RW(mem_used_max);
|
||||
static DEVICE_ATTR_RW(max_comp_streams);
|
||||
static DEVICE_ATTR_RW(comp_algorithm);
|
||||
|
||||
static ssize_t io_stat_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
ssize_t ret;
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
ret = scnprintf(buf, PAGE_SIZE,
|
||||
"%8llu %8llu %8llu %8llu\n",
|
||||
(u64)atomic64_read(&zram->stats.failed_reads),
|
||||
(u64)atomic64_read(&zram->stats.failed_writes),
|
||||
(u64)atomic64_read(&zram->stats.invalid_io),
|
||||
(u64)atomic64_read(&zram->stats.notify_free));
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t mm_stat_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
u64 orig_size, mem_used = 0;
|
||||
long max_used;
|
||||
ssize_t ret;
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
if (init_done(zram))
|
||||
mem_used = zs_get_total_pages(zram->meta->mem_pool);
|
||||
|
||||
orig_size = atomic64_read(&zram->stats.pages_stored);
|
||||
max_used = atomic_long_read(&zram->stats.max_used_pages);
|
||||
|
||||
ret = scnprintf(buf, PAGE_SIZE,
|
||||
"%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
|
||||
orig_size << PAGE_SHIFT,
|
||||
(u64)atomic64_read(&zram->stats.compr_data_size),
|
||||
mem_used << PAGE_SHIFT,
|
||||
zram->limit_pages << PAGE_SHIFT,
|
||||
max_used << PAGE_SHIFT,
|
||||
(u64)atomic64_read(&zram->stats.zero_pages),
|
||||
(u64)atomic64_read(&zram->stats.num_migrated));
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(io_stat);
|
||||
static DEVICE_ATTR_RO(mm_stat);
|
||||
ZRAM_ATTR_RO(num_reads);
|
||||
ZRAM_ATTR_RO(num_writes);
|
||||
ZRAM_ATTR_RO(failed_reads);
|
||||
ZRAM_ATTR_RO(failed_writes);
|
||||
ZRAM_ATTR_RO(invalid_io);
|
||||
ZRAM_ATTR_RO(notify_free);
|
||||
ZRAM_ATTR_RO(zero_pages);
|
||||
ZRAM_ATTR_RO(compr_data_size);
|
||||
|
||||
static struct attribute *zram_disk_attrs[] = {
|
||||
&dev_attr_disksize.attr,
|
||||
&dev_attr_initstate.attr,
|
||||
|
Loading…
Reference in New Issue
Block a user