staging: erofs: tidy up utils.c

Keep in line with erofs-outofstaging patchset:
 - Update comments in erofs_try_to_release_workgroup;
 - code style cleanup.

Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Link: https://lore.kernel.org/r/20190731155752.210602-21-gaoxiang25@huawei.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Gao Xiang 2019-07-31 23:57:50 +08:00 committed by Greg Kroah-Hartman
parent 4279f3f988
commit 2bb90cc275

View File

@ -114,8 +114,7 @@ int erofs_register_workgroup(struct super_block *sb,
*/ */
__erofs_workgroup_get(grp); __erofs_workgroup_get(grp);
err = radix_tree_insert(&sbi->workstn_tree, err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp);
grp->index, grp);
if (unlikely(err)) if (unlikely(err))
/* /*
* it's safe to decrease since the workgroup isn't visible * it's safe to decrease since the workgroup isn't visible
@ -156,18 +155,18 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
bool cleanup) bool cleanup)
{ {
/* /*
* for managed cache enabled, the refcount of workgroups * If managed cache is on, refcount of workgroups
* themselves could be < 0 (freezed). So there is no guarantee * themselves could be < 0 (freezed). In other words,
* that all refcount > 0 if managed cache is enabled. * there is no guarantee that all refcounts > 0.
*/ */
if (!erofs_workgroup_try_to_freeze(grp, 1)) if (!erofs_workgroup_try_to_freeze(grp, 1))
return false; return false;
/* /*
* note that all cached pages should be unlinked * Note that all cached pages should be unattached
* before delete it from the radix tree. * before deleted from the radix tree. Otherwise some
* Otherwise some cached pages of an orphan old workgroup * cached pages could be still attached to the orphan
* could be still linked after the new one is available. * old workgroup when the new one is available in the tree.
*/ */
if (erofs_try_to_free_all_cached_pages(sbi, grp)) { if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
erofs_workgroup_unfreeze(grp, 1); erofs_workgroup_unfreeze(grp, 1);
@ -175,7 +174,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
} }
/* /*
* it is impossible to fail after the workgroup is freezed, * It's impossible to fail after the workgroup is freezed,
* however in order to avoid some race conditions, add a * however in order to avoid some race conditions, add a
* DBG_BUGON to observe this in advance. * DBG_BUGON to observe this in advance.
*/ */
@ -183,8 +182,8 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
grp->index)) != grp); grp->index)) != grp);
/* /*
* if managed cache is enable, the last refcount * If managed cache is on, last refcount should indicate
* should indicate the related workstation. * the related workstation.
*/ */
erofs_workgroup_unfreeze_final(grp); erofs_workgroup_unfreeze_final(grp);
return true; return true;
@ -273,9 +272,9 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
unsigned long freed = 0; unsigned long freed = 0;
spin_lock(&erofs_sb_list_lock); spin_lock(&erofs_sb_list_lock);
do do {
run_no = ++shrinker_run_no; run_no = ++shrinker_run_no;
while (run_no == 0); } while (run_no == 0);
/* Iterate over all mounted superblocks and try to shrink them */ /* Iterate over all mounted superblocks and try to shrink them */
p = erofs_sb_list.next; p = erofs_sb_list.next;