zram: make deduplication feature optional

Benefit of deduplication is dependent on the workload so it's not
preferable to always enable. Therefore, make it optional in Kconfig
and device param. Default is 'off'. This option will be beneficial
for users who use the zram as blockdev and stores build output to it.

Change-Id: If282bb8aa15c5749859a87cf36db7eb9edb3b1ed
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Link: https://lore.kernel.org/patchwork/patch/787164/
Patch-mainline: linux-kernel@ Thu, 11 May 2017 22:30:52
Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
[swatsrid@codeaurora.org: Fix trivial merge conflicts]
Signed-off-by: Swathi Sridhar <swatsrid@codeaurora.org>
This commit is contained in:
Joonsoo Kim
2017-05-12 11:30:03 +09:00
committed by Gerrit - the friendly Code Review server
parent cc8cf81b25
commit be0c36ce98
8 changed files with 145 additions and 13 deletions

View File

@@ -137,3 +137,13 @@ Description:
The writeback_limit file is read-write and specifies the maximum The writeback_limit file is read-write and specifies the maximum
amount of writeback ZRAM can do. The limit could be changed amount of writeback ZRAM can do. The limit could be changed
in run time. in run time.
What: /sys/block/zram<id>/use_dedup
Date: March 2017
Contact: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Description:
The use_dedup file is read-write and specifies deduplication
feature is used or not. If enabled, duplicated data is
managed by reference count and will not be stored in memory
twice. Benefit of this feature largely depends on the workload
so keep attention when use.

View File

@@ -173,7 +173,7 @@ compact WO trigger memory compaction
debug_stat RO this file is used for zram debugging purposes debug_stat RO this file is used for zram debugging purposes
backing_dev RW set up backend storage for zram to write out backing_dev RW set up backend storage for zram to write out
idle WO mark allocated slot as idle idle WO mark allocated slot as idle
use_dedup RW show and set deduplication feature
User space is advised to use the following files to read the device statistics. User space is advised to use the following files to read the device statistics.

View File

@@ -15,6 +15,20 @@ config ZRAM
See Documentation/blockdev/zram.txt for more information. See Documentation/blockdev/zram.txt for more information.
config ZRAM_DEDUP
bool "Deduplication support for ZRAM data"
depends on ZRAM
default n
help
Deduplicate ZRAM data to reduce amount of memory consumption.
Advantage largely depends on the workload. In some cases, this
option reduces memory usage to the half. However, if there is no
duplicated data, the amount of memory consumption would be
increased due to additional metadata usage. And, there is
computation time trade-off. Please check the benefit before
enabling this option. Experiment shows the positive effect when
the zram is used as blockdev and is used to store build output.
config ZRAM_WRITEBACK config ZRAM_WRITEBACK
bool "Write back incompressible or idle page to backing device" bool "Write back incompressible or idle page to backing device"
depends on ZRAM depends on ZRAM

View File

@@ -1,3 +1,4 @@
zram-y := zcomp.o zram_drv.o zram_dedup.o zram-y := zcomp.o zram_drv.o
zram-$(CONFIG_ZRAM_DEDUP) += zram_dedup.o
obj-$(CONFIG_ZRAM) += zram.o obj-$(CONFIG_ZRAM) += zram.o

View File

@@ -41,6 +41,9 @@ void zram_dedup_insert(struct zram *zram, struct zram_entry *new,
struct rb_node **rb_node, *parent = NULL; struct rb_node **rb_node, *parent = NULL;
struct zram_entry *entry; struct zram_entry *entry;
if (!zram_dedup_enabled(zram))
return;
new->checksum = checksum; new->checksum = checksum;
hash = &zram->hash[checksum % zram->hash_size]; hash = &zram->hash[checksum % zram->hash_size];
rb_root = &hash->rb_root; rb_root = &hash->rb_root;
@@ -148,6 +151,9 @@ struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page,
void *mem; void *mem;
struct zram_entry *entry; struct zram_entry *entry;
if (!zram_dedup_enabled(zram))
return NULL;
mem = kmap_atomic(page); mem = kmap_atomic(page);
*checksum = zram_dedup_checksum(mem); *checksum = zram_dedup_checksum(mem);
@@ -160,6 +166,9 @@ struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page,
void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry, void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry,
unsigned long handle, unsigned int len) unsigned long handle, unsigned int len)
{ {
if (!zram_dedup_enabled(zram))
return;
entry->handle = handle; entry->handle = handle;
entry->refcount = 1; entry->refcount = 1;
entry->len = len; entry->len = len;
@@ -167,6 +176,9 @@ void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry,
bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry) bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry)
{ {
if (!zram_dedup_enabled(zram))
return true;
if (zram_dedup_put(zram, entry)) if (zram_dedup_put(zram, entry))
return false; return false;
@@ -178,6 +190,9 @@ int zram_dedup_init(struct zram *zram, size_t num_pages)
int i; int i;
struct zram_hash *hash; struct zram_hash *hash;
if (!zram_dedup_enabled(zram))
return 0;
zram->hash_size = num_pages >> ZRAM_HASH_SHIFT; zram->hash_size = num_pages >> ZRAM_HASH_SHIFT;
zram->hash_size = min_t(size_t, ZRAM_HASH_SIZE_MAX, zram->hash_size); zram->hash_size = min_t(size_t, ZRAM_HASH_SIZE_MAX, zram->hash_size);
zram->hash_size = max_t(size_t, ZRAM_HASH_SIZE_MIN, zram->hash_size); zram->hash_size = max_t(size_t, ZRAM_HASH_SIZE_MIN, zram->hash_size);

View File

@@ -4,6 +4,8 @@
struct zram; struct zram;
struct zram_entry; struct zram_entry;
#ifdef CONFIG_ZRAM_DEDUP
u64 zram_dedup_dup_size(struct zram *zram); u64 zram_dedup_dup_size(struct zram *zram);
u64 zram_dedup_meta_size(struct zram *zram); u64 zram_dedup_meta_size(struct zram *zram);
@@ -18,5 +20,26 @@ bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry);
int zram_dedup_init(struct zram *zram, size_t num_pages); int zram_dedup_init(struct zram *zram, size_t num_pages);
void zram_dedup_fini(struct zram *zram); void zram_dedup_fini(struct zram *zram);
#else
static inline u64 zram_dedup_dup_size(struct zram *zram) { return 0; }
static inline u64 zram_dedup_meta_size(struct zram *zram) { return 0; }
static inline void zram_dedup_insert(struct zram *zram, struct zram_entry *new,
u32 checksum) { }
static inline struct zram_entry *zram_dedup_find(struct zram *zram,
struct page *page, u32 *checksum) { return NULL; }
static inline void zram_dedup_init_entry(struct zram *zram,
struct zram_entry *entry, unsigned long handle,
unsigned int len) { }
static inline bool zram_dedup_put_entry(struct zram *zram,
struct zram_entry *entry) { return true; }
static inline int zram_dedup_init(struct zram *zram,
size_t num_pages) { return 0; }
static inline void zram_dedup_fini(struct zram *zram) { }
#endif
#endif /* _ZRAM_DEDUP_H_ */ #endif /* _ZRAM_DEDUP_H_ */

View File

@@ -1030,6 +1030,41 @@ static ssize_t comp_algorithm_store(struct device *dev,
return len; return len;
} }
static ssize_t use_dedup_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
bool val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
val = zram->use_dedup;
up_read(&zram->init_lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", (int)val);
}
#ifdef CONFIG_ZRAM_DEDUP
static ssize_t use_dedup_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int val;
struct zram *zram = dev_to_zram(dev);
if (kstrtoint(buf, 10, &val) || (val != 0 && val != 1))
return -EINVAL;
down_write(&zram->init_lock);
if (init_done(zram)) {
up_write(&zram->init_lock);
pr_info("Can't change dedup usage for initialized device\n");
return -EBUSY;
}
zram->use_dedup = val;
up_write(&zram->init_lock);
return len;
}
#endif
static ssize_t compact_store(struct device *dev, static ssize_t compact_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len) struct device_attribute *attr, const char *buf, size_t len)
{ {
@@ -1147,20 +1182,32 @@ static DEVICE_ATTR_RO(bd_stat);
#endif #endif
static DEVICE_ATTR_RO(debug_stat); static DEVICE_ATTR_RO(debug_stat);
static unsigned long zram_entry_handle(struct zram *zram,
struct zram_entry *entry)
{
if (zram_dedup_enabled(zram))
return entry->handle;
else
return (unsigned long)entry;
}
static struct zram_entry *zram_entry_alloc(struct zram *zram, static struct zram_entry *zram_entry_alloc(struct zram *zram,
unsigned int len, gfp_t flags) unsigned int len, gfp_t flags)
{ {
struct zram_entry *entry; struct zram_entry *entry;
unsigned long handle; unsigned long handle;
entry = kzalloc(sizeof(*entry), handle = zs_malloc(zram->mem_pool, len, flags);
flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA)); if (!handle)
if (!entry)
return NULL; return NULL;
handle = zs_malloc(zram->mem_pool, len, flags); if (!zram_dedup_enabled(zram))
if (!handle) { return (struct zram_entry *)handle;
kfree(entry);
entry = kzalloc(sizeof(*entry),
flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
if (!entry) {
zs_free(zram->mem_pool, handle);
return NULL; return NULL;
} }
@@ -1175,7 +1222,11 @@ void zram_entry_free(struct zram *zram, struct zram_entry *entry)
if (!zram_dedup_put_entry(zram, entry)) if (!zram_dedup_put_entry(zram, entry))
return; return;
zs_free(zram->mem_pool, entry->handle); zs_free(zram->mem_pool, zram_entry_handle(zram, entry));
if (!zram_dedup_enabled(zram))
return;
kfree(entry); kfree(entry);
atomic64_sub(sizeof(*entry), &zram->stats.meta_data_size); atomic64_sub(sizeof(*entry), &zram->stats.meta_data_size);
@@ -1311,7 +1362,8 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
size = zram_get_obj_size(zram, index); size = zram_get_obj_size(zram, index);
src = zs_map_object(zram->mem_pool, entry->handle, ZS_MM_RO); src = zs_map_object(zram->mem_pool,
zram_entry_handle(zram, entry), ZS_MM_RO);
if (size == PAGE_SIZE) { if (size == PAGE_SIZE) {
dst = kmap_atomic(page); dst = kmap_atomic(page);
memcpy(dst, src, PAGE_SIZE); memcpy(dst, src, PAGE_SIZE);
@@ -1325,7 +1377,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
kunmap_atomic(dst); kunmap_atomic(dst);
zcomp_stream_put(zram->comp); zcomp_stream_put(zram->comp);
} }
zs_unmap_object(zram->mem_pool, entry->handle); zs_unmap_object(zram->mem_pool, zram_entry_handle(zram, entry));
zram_slot_unlock(zram, index); zram_slot_unlock(zram, index);
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
@@ -1454,7 +1506,8 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
return -ENOMEM; return -ENOMEM;
} }
dst = zs_map_object(zram->mem_pool, entry->handle, ZS_MM_WO); dst = zs_map_object(zram->mem_pool,
zram_entry_handle(zram, entry), ZS_MM_WO);
src = zstrm->buffer; src = zstrm->buffer;
if (comp_len == PAGE_SIZE) if (comp_len == PAGE_SIZE)
@@ -1464,7 +1517,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
kunmap_atomic(src); kunmap_atomic(src);
zcomp_stream_put(zram->comp); zcomp_stream_put(zram->comp);
zs_unmap_object(zram->mem_pool, entry->handle); zs_unmap_object(zram->mem_pool, zram_entry_handle(zram, entry));
atomic64_add(comp_len, &zram->stats.compr_data_size); atomic64_add(comp_len, &zram->stats.compr_data_size);
zram_dedup_insert(zram, entry, checksum); zram_dedup_insert(zram, entry, checksum);
out: out:
@@ -1908,6 +1961,11 @@ static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit); static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable); static DEVICE_ATTR_RW(writeback_limit_enable);
#endif #endif
#ifdef CONFIG_ZRAM_DEDUP
static DEVICE_ATTR_RW(use_dedup);
#else
static DEVICE_ATTR_RO(use_dedup);
#endif
static struct attribute *zram_disk_attrs[] = { static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr, &dev_attr_disksize.attr,
@@ -1925,6 +1983,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_writeback_limit.attr, &dev_attr_writeback_limit.attr,
&dev_attr_writeback_limit_enable.attr, &dev_attr_writeback_limit_enable.attr,
#endif #endif
&dev_attr_use_dedup.attr,
&dev_attr_io_stat.attr, &dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr, &dev_attr_mm_stat.attr,
#ifdef CONFIG_ZRAM_WRITEBACK #ifdef CONFIG_ZRAM_WRITEBACK

View File

@@ -134,6 +134,7 @@ struct zram {
* zram is claimed so open request will be failed * zram is claimed so open request will be failed
*/ */
bool claim; /* Protected by bdev->bd_mutex */ bool claim; /* Protected by bdev->bd_mutex */
bool use_dedup;
struct file *backing_dev; struct file *backing_dev;
#ifdef CONFIG_ZRAM_WRITEBACK #ifdef CONFIG_ZRAM_WRITEBACK
spinlock_t wb_limit_lock; spinlock_t wb_limit_lock;
@@ -149,5 +150,14 @@ struct zram {
#endif #endif
}; };
static inline bool zram_dedup_enabled(struct zram *zram)
{
#ifdef CONFIG_ZRAM_DEDUP
return zram->use_dedup;
#else
return false;
#endif
}
void zram_entry_free(struct zram *zram, struct zram_entry *entry); void zram_entry_free(struct zram *zram, struct zram_entry *entry);
#endif #endif