From b4f76651ef782fd726ece1e3b9e45e10c9525399 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Fri, 6 Aug 2021 08:05:58 +0800 Subject: [PATCH] f2fs: avoid unneeded memory allocation in __add_ino_entry() __add_ino_entry() will allocate slab cache even if we have already cached ino entry in radix tree, e.g. for case of multiple devices. Let's check radix tree first under protection of rcu lock to see whether we need to do slab allocation, it will mitigate memory pressure from "f2fs_ino_entry" slab cache. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index f6f3685ec7b8..fba8100b0333 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -465,16 +465,28 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, unsigned int devidx, int type) { struct inode_management *im = &sbi->im[type]; - struct ino_entry *e, *tmp; + struct ino_entry *e = NULL, *new = NULL; - tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS); + if (type == FLUSH_INO) { + rcu_read_lock(); + e = radix_tree_lookup(&im->ino_root, ino); + rcu_read_unlock(); + } + +retry: + if (!e) + new = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS); radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); spin_lock(&im->ino_lock); e = radix_tree_lookup(&im->ino_root, ino); if (!e) { - e = tmp; + if (!new) { + spin_unlock(&im->ino_lock); + goto retry; + } + e = new; if (unlikely(radix_tree_insert(&im->ino_root, ino, e))) f2fs_bug_on(sbi, 1); @@ -492,8 +504,8 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, spin_unlock(&im->ino_lock); radix_tree_preload_end(); - if (e != tmp) - kmem_cache_free(ino_entry_slab, tmp); + if (new && e != new) + kmem_cache_free(ino_entry_slab, new); } static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)