invalidate_mapping_pages(): add cond_resched

invalidate_mapping_pages() can sometimes take a long time (millions of pages
to free).  Long enough for the softlockup detector to trigger.

We used to have a cond_resched() in there but I took it out because the
drop_caches code calls invalidate_mapping_pages() under inode_lock.

The patch adds a nasty flag and puts the cond_resched() back.

Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrew Morton 2007-07-15 23:38:14 -07:00 committed by Linus Torvalds
parent 45426812d6
commit fc9a07e7bf
3 changed files with 27 additions and 16 deletions

View File

@ -20,7 +20,7 @@ static void drop_pagecache_sb(struct super_block *sb)
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_FREEING|I_WILL_FREE)) if (inode->i_state & (I_FREEING|I_WILL_FREE))
continue; continue;
invalidate_mapping_pages(inode->i_mapping, 0, -1); __invalidate_mapping_pages(inode->i_mapping, 0, -1, true);
} }
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
} }

View File

@ -1610,6 +1610,9 @@ extern int __invalidate_device(struct block_device *);
extern int invalidate_partition(struct gendisk *, int); extern int invalidate_partition(struct gendisk *, int);
#endif #endif
extern int invalidate_inodes(struct super_block *); extern int invalidate_inodes(struct super_block *);
unsigned long __invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end,
bool be_atomic);
unsigned long invalidate_mapping_pages(struct address_space *mapping, unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end); pgoff_t start, pgoff_t end);

View File

@ -253,21 +253,8 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
} }
EXPORT_SYMBOL(truncate_inode_pages); EXPORT_SYMBOL(truncate_inode_pages);
/** unsigned long __invalidate_mapping_pages(struct address_space *mapping,
* invalidate_mapping_pages - Invalidate all the unlocked pages of one inode pgoff_t start, pgoff_t end, bool be_atomic)
* @mapping: the address_space which holds the pages to invalidate
* @start: the offset 'from' which to invalidate
* @end: the offset 'to' which to invalidate (inclusive)
*
* This function only removes the unlocked pages, if you want to
* remove all the pages of one inode, you must call truncate_inode_pages.
*
* invalidate_mapping_pages() will not block on IO activity. It will not
* invalidate pages which are dirty, locked, under writeback or mapped into
* pagetables.
*/
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{ {
struct pagevec pvec; struct pagevec pvec;
pgoff_t next = start; pgoff_t next = start;
@ -308,9 +295,30 @@ unlock:
break; break;
} }
pagevec_release(&pvec); pagevec_release(&pvec);
if (likely(!be_atomic))
cond_resched();
} }
return ret; return ret;
} }
/**
* invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
* @mapping: the address_space which holds the pages to invalidate
* @start: the offset 'from' which to invalidate
* @end: the offset 'to' which to invalidate (inclusive)
*
* This function only removes the unlocked pages, if you want to
* remove all the pages of one inode, you must call truncate_inode_pages.
*
* invalidate_mapping_pages() will not block on IO activity. It will not
* invalidate pages which are dirty, locked, under writeback or mapped into
* pagetables.
*/
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
return __invalidate_mapping_pages(mapping, start, end, false);
}
EXPORT_SYMBOL(invalidate_mapping_pages); EXPORT_SYMBOL(invalidate_mapping_pages);
/* /*