forked from Minki/linux
ext4: Convert list_for_each_rcu() to list_for_each_entry_rcu()
The list_for_each_entry_rcu() primitive should be used instead of list_for_each_rcu(), as the former is easier to use and provides better type safety. http://groups.google.com/group/linux.kernel/browse_thread/thread/45749c83451cebeb/0633a65759ce7713?lnk=raot Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Roel Kluin <12o3l@tiscali.nl> Signed-off-by: Mingming Cao <cmm@us.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
parent
4ddfef7b41
commit
9a0762c5af
@ -3149,10 +3149,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
||||
{
|
||||
int bsbits, max;
|
||||
ext4_lblk_t end;
|
||||
struct list_head *cur;
|
||||
loff_t size, orig_size, start_off;
|
||||
ext4_lblk_t start, orig_start;
|
||||
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
||||
struct ext4_prealloc_space *pa;
|
||||
|
||||
/* do normalize only data requests, metadata requests
|
||||
do not need preallocation */
|
||||
@ -3238,12 +3238,9 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
||||
|
||||
/* check we don't cross already preallocated blocks */
|
||||
rcu_read_lock();
|
||||
list_for_each_rcu(cur, &ei->i_prealloc_list) {
|
||||
struct ext4_prealloc_space *pa;
|
||||
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
|
||||
unsigned long pa_end;
|
||||
|
||||
pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
|
||||
|
||||
if (pa->pa_deleted)
|
||||
continue;
|
||||
spin_lock(&pa->pa_lock);
|
||||
@ -3285,10 +3282,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
||||
|
||||
/* XXX: extra loop to check we really don't overlap preallocations */
|
||||
rcu_read_lock();
|
||||
list_for_each_rcu(cur, &ei->i_prealloc_list) {
|
||||
struct ext4_prealloc_space *pa;
|
||||
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
|
||||
unsigned long pa_end;
|
||||
pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
|
||||
spin_lock(&pa->pa_lock);
|
||||
if (pa->pa_deleted == 0) {
|
||||
pa_end = pa->pa_lstart + pa->pa_len;
|
||||
@ -3416,7 +3411,6 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
||||
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
||||
struct ext4_locality_group *lg;
|
||||
struct ext4_prealloc_space *pa;
|
||||
struct list_head *cur;
|
||||
|
||||
/* only data can be preallocated */
|
||||
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
|
||||
@ -3424,8 +3418,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
||||
|
||||
/* first, try per-file preallocation */
|
||||
rcu_read_lock();
|
||||
list_for_each_rcu(cur, &ei->i_prealloc_list) {
|
||||
pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
|
||||
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
|
||||
|
||||
/* all fields in this condition don't change,
|
||||
* so we can skip locking for them */
|
||||
@ -3457,8 +3450,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_rcu(cur, &lg->lg_prealloc_list) {
|
||||
pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
|
||||
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list, pa_inode_list) {
|
||||
spin_lock(&pa->pa_lock);
|
||||
if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
|
||||
atomic_inc(&pa->pa_count);
|
||||
|
Loading…
Reference in New Issue
Block a user