forked from Minki/linux
FS-Cache patches 2013-07-02
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (GNU/Linux) iQIVAwUAUdLdUxOxKuMESys7AQK1kQ//W7fgFXCG+5XVk4ECHGN5tqRn4tU69DY0 9nYU2/y1wbqV5cTO36XTcFPQK1qbW2ZdyvEZ2CF8OfwtQpLmcALGtpBIgJwYs+4H DMkgO06zdk4caxc0C4JBIGs+MDeLNk2SQObqblGl1BAQKQ5cqsCLsIZ/rxln999m ufuobfns1YvuHkzMtswUDmm3zWMpwqqPAbbl+fTwPU683a/AleckG2ACyFvKZAxA OyI8kJR4e33a3/BGo/5OFb3qI1+Z25EOWdvdnM+r4hdKJZF9ZySlyc640GZHAO2J wKj5lYp1nBpyNPvYvly174s2MxPju1CRHb7gxcV4LX3vtEY4/MCg7m6P46EUfC6R C3V7PMMCjZXEQ01MKEmGig47EJKIiecCQUZupJnP7HFKPzeJR9mQZFd68WqzswAM w9hcCw9hQ9y/kTDVrTVCHs0Q9iTxShfrJyfRJnQ1VcoT+1dieruTa9am9OBKiEw6 CQrPjq9RZZfsZHYr6RlGZHGJyzjrTzrf6EhxwmgaCxWycpvCuV7z76YgAVZI7V4r qnJmH8dXWdoSA7nZ6sgsb5TRCLT9wu1nNId0DMpAGB1cDGga/55AZtqxdoJLnlkj y/4wQavIrkfHHuS8c3gzVXPtYmM19CHgcKRFydXD0uGobzfxwYKTKMH+Gviu1NnH /pGNNY2vVGI= =Wjhu -----END PGP SIGNATURE----- Merge tag 'fscache-20130702' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs Pull FS-Cache updates from David Howells: "This contains a number of fixes for various FS-Cache issues plus some cleanups. The commits are, in order: 1) Provide a system wait_on_atomic_t() and wake_up_atomic_t() sharing the bit-wait table (enhancement for #8). 2) Don't put spin_lock() in a while-condition as spin_lock() may have a do {} while(0) wrapper (cleanup). 3) Symbolically name i_mutex lock classes rather than using numbers in CacheFiles (cleanup). 4) Don't sleep in page release if __GFP_FS is not set (deadlock vs ext4). 5) Uninline fscache_object_init() (cleanup for #7). 6) Wrap checks on object state (cleanup for #7). 7) Simplify the object state machine by separating work states from wait states. 8) Simplify cookie retention by objects (NULL pointer deref fix). 9) Remove unused list_to_page() macro (cleanup). 10) Make the remaining-pages counter in the retrieval op atomic (assertion failure fix). 11) Don't use spin_is_locked() in assertions (assertion failure fix)" * tag 'fscache-20130702' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: FS-Cache: Don't use spin_is_locked() in assertions FS-Cache: The retrieval remaining-pages counter needs to be atomic_t cachefiles: remove unused macro list_to_page() FS-Cache: Simplify cookie retention for fscache_objects, fixing oops FS-Cache: Fix object state machine to have separate work and wait states FS-Cache: Wrap checks on object state FS-Cache: Uninline fscache_object_init() FS-Cache: Don't sleep in page release if __GFP_FS is not set CacheFiles: name i_mutex lock class explicitly fs/fscache: remove spin_lock() from the condition in while() Add wait_on_atomic_t() and wake_up_atomic_t()
This commit is contained in:
commit
bcd7351e83
@ -13,8 +13,6 @@
|
||||
#include <linux/mount.h>
|
||||
#include "internal.h"
|
||||
|
||||
#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
|
||||
|
||||
struct cachefiles_lookup_data {
|
||||
struct cachefiles_xattr *auxdata; /* auxiliary data */
|
||||
char *key; /* key path */
|
||||
@ -212,20 +210,29 @@ static void cachefiles_update_object(struct fscache_object *_object)
|
||||
object = container_of(_object, struct cachefiles_object, fscache);
|
||||
cache = container_of(object->fscache.cache, struct cachefiles_cache,
|
||||
cache);
|
||||
|
||||
if (!fscache_use_cookie(_object)) {
|
||||
_leave(" [relinq]");
|
||||
return;
|
||||
}
|
||||
|
||||
cookie = object->fscache.cookie;
|
||||
|
||||
if (!cookie->def->get_aux) {
|
||||
fscache_unuse_cookie(_object);
|
||||
_leave(" [no aux]");
|
||||
return;
|
||||
}
|
||||
|
||||
auxdata = kmalloc(2 + 512 + 3, cachefiles_gfp);
|
||||
if (!auxdata) {
|
||||
fscache_unuse_cookie(_object);
|
||||
_leave(" [nomem]");
|
||||
return;
|
||||
}
|
||||
|
||||
auxlen = cookie->def->get_aux(cookie->netfs_data, auxdata->data, 511);
|
||||
fscache_unuse_cookie(_object);
|
||||
ASSERTCMP(auxlen, <, 511);
|
||||
|
||||
auxdata->len = auxlen + 1;
|
||||
@ -263,7 +270,7 @@ static void cachefiles_drop_object(struct fscache_object *_object)
|
||||
#endif
|
||||
|
||||
/* delete retired objects */
|
||||
if (object->fscache.state == FSCACHE_OBJECT_RECYCLING &&
|
||||
if (test_bit(FSCACHE_COOKIE_RETIRED, &object->fscache.cookie->flags) &&
|
||||
_object != cache->cache.fsdef
|
||||
) {
|
||||
_debug("- retire object OBJ%x", object->fscache.debug_id);
|
||||
|
@ -38,7 +38,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
|
||||
printk(KERN_ERR "%sobject: OBJ%x\n",
|
||||
prefix, object->fscache.debug_id);
|
||||
printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
|
||||
prefix, fscache_object_states[object->fscache.state],
|
||||
prefix, object->fscache.state->name,
|
||||
object->fscache.flags, work_busy(&object->fscache.work),
|
||||
object->fscache.events, object->fscache.event_mask);
|
||||
printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
|
||||
@ -127,10 +127,10 @@ static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
|
||||
found_dentry:
|
||||
kdebug("preemptive burial: OBJ%x [%s] %p",
|
||||
object->fscache.debug_id,
|
||||
fscache_object_states[object->fscache.state],
|
||||
object->fscache.state->name,
|
||||
dentry);
|
||||
|
||||
if (object->fscache.state < FSCACHE_OBJECT_DYING) {
|
||||
if (fscache_object_is_live(&object->fscache)) {
|
||||
printk(KERN_ERR "\n");
|
||||
printk(KERN_ERR "CacheFiles: Error:"
|
||||
" Can't preemptively bury live object\n");
|
||||
@ -192,7 +192,7 @@ try_again:
|
||||
/* an old object from a previous incarnation is hogging the slot - we
|
||||
* need to wait for it to be destroyed */
|
||||
wait_for_old_object:
|
||||
if (xobject->fscache.state < FSCACHE_OBJECT_DYING) {
|
||||
if (fscache_object_is_live(&object->fscache)) {
|
||||
printk(KERN_ERR "\n");
|
||||
printk(KERN_ERR "CacheFiles: Error:"
|
||||
" Unexpected object collision\n");
|
||||
@ -836,7 +836,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
|
||||
// dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
|
||||
|
||||
/* look up the victim */
|
||||
mutex_lock_nested(&dir->d_inode->i_mutex, 1);
|
||||
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
|
||||
|
||||
start = jiffies;
|
||||
victim = lookup_one_len(filename, dir, strlen(filename));
|
||||
|
@ -109,13 +109,12 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object,
|
||||
struct dentry *dentry = object->dentry;
|
||||
int ret;
|
||||
|
||||
ASSERT(object->fscache.cookie);
|
||||
ASSERT(dentry);
|
||||
|
||||
_enter("%p,#%d", object, auxdata->len);
|
||||
|
||||
/* attempt to install the cache metadata directly */
|
||||
_debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
|
||||
_debug("SET #%u", auxdata->len);
|
||||
|
||||
ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
|
||||
&auxdata->type, auxdata->len,
|
||||
@ -138,13 +137,12 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
|
||||
struct dentry *dentry = object->dentry;
|
||||
int ret;
|
||||
|
||||
ASSERT(object->fscache.cookie);
|
||||
ASSERT(dentry);
|
||||
|
||||
_enter("%p,#%d", object, auxdata->len);
|
||||
|
||||
/* attempt to install the cache metadata directly */
|
||||
_debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
|
||||
_debug("SET #%u", auxdata->len);
|
||||
|
||||
ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
|
||||
&auxdata->type, auxdata->len,
|
||||
|
@ -115,7 +115,7 @@ struct fscache_cache *fscache_select_cache_for_object(
|
||||
struct fscache_object, cookie_link);
|
||||
|
||||
cache = object->cache;
|
||||
if (object->state >= FSCACHE_OBJECT_DYING ||
|
||||
if (fscache_object_is_dying(object) ||
|
||||
test_bit(FSCACHE_IOERROR, &cache->flags))
|
||||
cache = NULL;
|
||||
|
||||
@ -224,8 +224,10 @@ int fscache_add_cache(struct fscache_cache *cache,
|
||||
BUG_ON(!ifsdef);
|
||||
|
||||
cache->flags = 0;
|
||||
ifsdef->event_mask = ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED);
|
||||
ifsdef->state = FSCACHE_OBJECT_ACTIVE;
|
||||
ifsdef->event_mask =
|
||||
((1 << NR_FSCACHE_OBJECT_EVENTS) - 1) &
|
||||
~(1 << FSCACHE_OBJECT_EV_CLEARED);
|
||||
__set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &ifsdef->flags);
|
||||
|
||||
if (!tagname)
|
||||
tagname = cache->identifier;
|
||||
@ -330,25 +332,25 @@ static void fscache_withdraw_all_objects(struct fscache_cache *cache,
|
||||
{
|
||||
struct fscache_object *object;
|
||||
|
||||
spin_lock(&cache->object_list_lock);
|
||||
|
||||
while (!list_empty(&cache->object_list)) {
|
||||
object = list_entry(cache->object_list.next,
|
||||
struct fscache_object, cache_link);
|
||||
list_move_tail(&object->cache_link, dying_objects);
|
||||
|
||||
_debug("withdraw %p", object->cookie);
|
||||
|
||||
spin_lock(&object->lock);
|
||||
spin_unlock(&cache->object_list_lock);
|
||||
fscache_raise_event(object, FSCACHE_OBJECT_EV_WITHDRAW);
|
||||
spin_unlock(&object->lock);
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&cache->object_list_lock);
|
||||
}
|
||||
|
||||
spin_unlock(&cache->object_list_lock);
|
||||
if (!list_empty(&cache->object_list)) {
|
||||
object = list_entry(cache->object_list.next,
|
||||
struct fscache_object, cache_link);
|
||||
list_move_tail(&object->cache_link, dying_objects);
|
||||
|
||||
_debug("withdraw %p", object->cookie);
|
||||
|
||||
/* This must be done under object_list_lock to prevent
|
||||
* a race with fscache_drop_object().
|
||||
*/
|
||||
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
|
||||
}
|
||||
|
||||
spin_unlock(&cache->object_list_lock);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -95,6 +95,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
|
||||
atomic_set(&cookie->usage, 1);
|
||||
atomic_set(&cookie->n_children, 0);
|
||||
|
||||
/* We keep the active count elevated until relinquishment to prevent an
|
||||
* attempt to wake up every time the object operations queue quiesces.
|
||||
*/
|
||||
atomic_set(&cookie->n_active, 1);
|
||||
|
||||
atomic_inc(&parent->usage);
|
||||
atomic_inc(&parent->n_children);
|
||||
|
||||
@ -177,7 +182,6 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
|
||||
|
||||
cookie->flags =
|
||||
(1 << FSCACHE_COOKIE_LOOKING_UP) |
|
||||
(1 << FSCACHE_COOKIE_CREATING) |
|
||||
(1 << FSCACHE_COOKIE_NO_DATA_YET);
|
||||
|
||||
/* ask the cache to allocate objects for this cookie and its parent
|
||||
@ -205,7 +209,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
|
||||
|
||||
/* initiate the process of looking up all the objects in the chain
|
||||
* (done by fscache_initialise_object()) */
|
||||
fscache_enqueue_object(object);
|
||||
fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
|
||||
|
||||
spin_unlock(&cookie->lock);
|
||||
|
||||
@ -285,7 +289,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
|
||||
|
||||
object_already_extant:
|
||||
ret = -ENOBUFS;
|
||||
if (object->state >= FSCACHE_OBJECT_DYING) {
|
||||
if (fscache_object_is_dead(object)) {
|
||||
spin_unlock(&cookie->lock);
|
||||
goto error;
|
||||
}
|
||||
@ -321,7 +325,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
|
||||
ret = -EEXIST;
|
||||
hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
|
||||
if (p->cache == object->cache) {
|
||||
if (p->state >= FSCACHE_OBJECT_DYING)
|
||||
if (fscache_object_is_dying(p))
|
||||
ret = -ENOBUFS;
|
||||
goto cant_attach_object;
|
||||
}
|
||||
@ -332,7 +336,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
|
||||
hlist_for_each_entry(p, &cookie->parent->backing_objects,
|
||||
cookie_link) {
|
||||
if (p->cache == object->cache) {
|
||||
if (p->state >= FSCACHE_OBJECT_DYING) {
|
||||
if (fscache_object_is_dying(p)) {
|
||||
ret = -ENOBUFS;
|
||||
spin_unlock(&cookie->parent->lock);
|
||||
goto cant_attach_object;
|
||||
@ -400,7 +404,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
|
||||
object = hlist_entry(cookie->backing_objects.first,
|
||||
struct fscache_object,
|
||||
cookie_link);
|
||||
if (object->state < FSCACHE_OBJECT_DYING)
|
||||
if (fscache_object_is_live(object))
|
||||
fscache_raise_event(
|
||||
object, FSCACHE_OBJECT_EV_INVALIDATE);
|
||||
}
|
||||
@ -467,9 +471,7 @@ EXPORT_SYMBOL(__fscache_update_cookie);
|
||||
*/
|
||||
void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
|
||||
{
|
||||
struct fscache_cache *cache;
|
||||
struct fscache_object *object;
|
||||
unsigned long event;
|
||||
|
||||
fscache_stat(&fscache_n_relinquishes);
|
||||
if (retire)
|
||||
@ -481,8 +483,11 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
|
||||
return;
|
||||
}
|
||||
|
||||
_enter("%p{%s,%p},%d",
|
||||
cookie, cookie->def->name, cookie->netfs_data, retire);
|
||||
_enter("%p{%s,%p,%d},%d",
|
||||
cookie, cookie->def->name, cookie->netfs_data,
|
||||
atomic_read(&cookie->n_active), retire);
|
||||
|
||||
ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
|
||||
|
||||
if (atomic_read(&cookie->n_children) != 0) {
|
||||
printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
|
||||
@ -490,62 +495,28 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* wait for the cookie to finish being instantiated (or to fail) */
|
||||
if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
|
||||
fscache_stat(&fscache_n_relinquishes_waitcrt);
|
||||
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
|
||||
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
/* No further netfs-accessing operations on this cookie permitted */
|
||||
set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
|
||||
if (retire)
|
||||
set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
|
||||
|
||||
event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
|
||||
|
||||
try_again:
|
||||
spin_lock(&cookie->lock);
|
||||
|
||||
/* break links with all the active objects */
|
||||
while (!hlist_empty(&cookie->backing_objects)) {
|
||||
int n_reads;
|
||||
object = hlist_entry(cookie->backing_objects.first,
|
||||
struct fscache_object,
|
||||
cookie_link);
|
||||
|
||||
_debug("RELEASE OBJ%x", object->debug_id);
|
||||
|
||||
set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags);
|
||||
n_reads = atomic_read(&object->n_reads);
|
||||
if (n_reads) {
|
||||
int n_ops = object->n_ops;
|
||||
int n_in_progress = object->n_in_progress;
|
||||
spin_unlock(&cookie->lock);
|
||||
printk(KERN_ERR "FS-Cache:"
|
||||
" Cookie '%s' still has %d outstanding reads (%d,%d)\n",
|
||||
cookie->def->name,
|
||||
n_reads, n_ops, n_in_progress);
|
||||
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS,
|
||||
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
|
||||
printk("Wait finished\n");
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
/* detach each cache object from the object cookie */
|
||||
spin_lock(&object->lock);
|
||||
hlist_del_init(&object->cookie_link);
|
||||
|
||||
cache = object->cache;
|
||||
object->cookie = NULL;
|
||||
fscache_raise_event(object, event);
|
||||
spin_unlock(&object->lock);
|
||||
|
||||
if (atomic_dec_and_test(&cookie->usage))
|
||||
/* the cookie refcount shouldn't be reduced to 0 yet */
|
||||
BUG();
|
||||
hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
|
||||
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
|
||||
}
|
||||
spin_unlock(&cookie->lock);
|
||||
|
||||
/* detach pointers back to the netfs */
|
||||
/* Wait for cessation of activity requiring access to the netfs (when
|
||||
* n_active reaches 0).
|
||||
*/
|
||||
if (!atomic_dec_and_test(&cookie->n_active))
|
||||
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/* Clear pointers back to the netfs */
|
||||
cookie->netfs_data = NULL;
|
||||
cookie->def = NULL;
|
||||
|
||||
spin_unlock(&cookie->lock);
|
||||
BUG_ON(cookie->stores.rnode);
|
||||
|
||||
if (cookie->parent) {
|
||||
ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
|
||||
@ -553,7 +524,7 @@ try_again:
|
||||
atomic_dec(&cookie->parent->n_children);
|
||||
}
|
||||
|
||||
/* finally dispose of the cookie */
|
||||
/* Dispose of the netfs's link to the cookie */
|
||||
ASSERTCMP(atomic_read(&cookie->usage), >, 0);
|
||||
fscache_cookie_put(cookie);
|
||||
|
||||
|
@ -55,6 +55,7 @@ static struct fscache_cookie_def fscache_fsdef_index_def = {
|
||||
|
||||
struct fscache_cookie fscache_fsdef_index = {
|
||||
.usage = ATOMIC_INIT(1),
|
||||
.n_active = ATOMIC_INIT(1),
|
||||
.lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
|
||||
.backing_objects = HLIST_HEAD_INIT,
|
||||
.def = &fscache_fsdef_index_def,
|
||||
|
@ -93,14 +93,11 @@ static inline bool fscache_object_congested(void)
|
||||
|
||||
extern int fscache_wait_bit(void *);
|
||||
extern int fscache_wait_bit_interruptible(void *);
|
||||
extern int fscache_wait_atomic_t(atomic_t *);
|
||||
|
||||
/*
|
||||
* object.c
|
||||
*/
|
||||
extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5];
|
||||
|
||||
extern void fscache_withdrawing_object(struct fscache_cache *,
|
||||
struct fscache_object *);
|
||||
extern void fscache_enqueue_object(struct fscache_object *);
|
||||
|
||||
/*
|
||||
@ -110,8 +107,10 @@ extern void fscache_enqueue_object(struct fscache_object *);
|
||||
extern const struct file_operations fscache_objlist_fops;
|
||||
|
||||
extern void fscache_objlist_add(struct fscache_object *);
|
||||
extern void fscache_objlist_remove(struct fscache_object *);
|
||||
#else
|
||||
#define fscache_objlist_add(object) do {} while(0)
|
||||
#define fscache_objlist_remove(object) do {} while(0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -291,6 +290,10 @@ static inline void fscache_raise_event(struct fscache_object *object,
|
||||
unsigned event)
|
||||
{
|
||||
BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS);
|
||||
#if 0
|
||||
printk("*** fscache_raise_event(OBJ%d{%lx},%x)\n",
|
||||
object->debug_id, object->event_mask, (1 << event));
|
||||
#endif
|
||||
if (!test_and_set_bit(event, &object->events) &&
|
||||
test_bit(event, &object->event_mask))
|
||||
fscache_enqueue_object(object);
|
||||
|
@ -205,7 +205,6 @@ int fscache_wait_bit(void *flags)
|
||||
schedule();
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_wait_bit);
|
||||
|
||||
/*
|
||||
* wait_on_bit() sleep function for interruptible waiting
|
||||
@ -215,4 +214,12 @@ int fscache_wait_bit_interruptible(void *flags)
|
||||
schedule();
|
||||
return signal_pending(current);
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_wait_bit_interruptible);
|
||||
|
||||
/*
|
||||
* wait_on_atomic_t() sleep function for uninterruptible waiting
|
||||
*/
|
||||
int fscache_wait_atomic_t(atomic_t *p)
|
||||
{
|
||||
schedule();
|
||||
return 0;
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
|
||||
/* initialise the primary index cookie */
|
||||
atomic_set(&netfs->primary_index->usage, 1);
|
||||
atomic_set(&netfs->primary_index->n_children, 0);
|
||||
atomic_set(&netfs->primary_index->n_active, 1);
|
||||
|
||||
netfs->primary_index->def = &fscache_fsdef_netfs_def;
|
||||
netfs->primary_index->parent = &fscache_fsdef_index;
|
||||
|
@ -70,13 +70,10 @@ void fscache_objlist_add(struct fscache_object *obj)
|
||||
write_unlock(&fscache_object_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_object_destroy - Note that a cache object is about to be destroyed
|
||||
* @object: The object to be destroyed
|
||||
*
|
||||
* Note the imminent destruction and deallocation of a cache object record.
|
||||
/*
|
||||
* Remove an object from the object list.
|
||||
*/
|
||||
void fscache_object_destroy(struct fscache_object *obj)
|
||||
void fscache_objlist_remove(struct fscache_object *obj)
|
||||
{
|
||||
write_lock(&fscache_object_list_lock);
|
||||
|
||||
@ -85,7 +82,6 @@ void fscache_object_destroy(struct fscache_object *obj)
|
||||
|
||||
write_unlock(&fscache_object_list_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_object_destroy);
|
||||
|
||||
/*
|
||||
* find the object in the tree on or after the specified index
|
||||
@ -166,15 +162,14 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct fscache_objlist_data *data = m->private;
|
||||
struct fscache_object *obj = v;
|
||||
struct fscache_cookie *cookie;
|
||||
unsigned long config = data->config;
|
||||
uint16_t keylen, auxlen;
|
||||
char _type[3], *type;
|
||||
bool no_cookie;
|
||||
u8 *buf = data->buf, *p;
|
||||
|
||||
if ((unsigned long) v == 1) {
|
||||
seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS"
|
||||
" EM EV F S"
|
||||
" EM EV FL S"
|
||||
" | NETFS_COOKIE_DEF TY FL NETFS_DATA");
|
||||
if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
|
||||
FSCACHE_OBJLIST_CONFIG_AUX))
|
||||
@ -193,7 +188,7 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
|
||||
|
||||
if ((unsigned long) v == 2) {
|
||||
seq_puts(m, "======== ======== ==== ===== === === === == ====="
|
||||
" == == = ="
|
||||
" == == == ="
|
||||
" | ================ == == ================");
|
||||
if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
|
||||
FSCACHE_OBJLIST_CONFIG_AUX))
|
||||
@ -216,10 +211,11 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
cookie = obj->cookie;
|
||||
if (~config) {
|
||||
FILTER(obj->cookie,
|
||||
FILTER(cookie->def,
|
||||
COOKIE, NOCOOKIE);
|
||||
FILTER(obj->state != FSCACHE_OBJECT_ACTIVE ||
|
||||
FILTER(fscache_object_is_active(obj) ||
|
||||
obj->n_ops != 0 ||
|
||||
obj->n_obj_ops != 0 ||
|
||||
obj->flags ||
|
||||
@ -235,10 +231,10 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
|
||||
}
|
||||
|
||||
seq_printf(m,
|
||||
"%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1x | ",
|
||||
"%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %2lx %1x | ",
|
||||
obj->debug_id,
|
||||
obj->parent ? obj->parent->debug_id : -1,
|
||||
fscache_object_states_short[obj->state],
|
||||
obj->state->short_name,
|
||||
obj->n_children,
|
||||
obj->n_ops,
|
||||
obj->n_obj_ops,
|
||||
@ -250,48 +246,40 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
|
||||
obj->flags,
|
||||
work_busy(&obj->work));
|
||||
|
||||
no_cookie = true;
|
||||
keylen = auxlen = 0;
|
||||
if (obj->cookie) {
|
||||
spin_lock(&obj->lock);
|
||||
if (obj->cookie) {
|
||||
switch (obj->cookie->def->type) {
|
||||
case 0:
|
||||
type = "IX";
|
||||
break;
|
||||
case 1:
|
||||
type = "DT";
|
||||
break;
|
||||
default:
|
||||
sprintf(_type, "%02u",
|
||||
obj->cookie->def->type);
|
||||
type = _type;
|
||||
break;
|
||||
}
|
||||
if (fscache_use_cookie(obj)) {
|
||||
uint16_t keylen = 0, auxlen = 0;
|
||||
|
||||
seq_printf(m, "%-16s %s %2lx %16p",
|
||||
obj->cookie->def->name,
|
||||
type,
|
||||
obj->cookie->flags,
|
||||
obj->cookie->netfs_data);
|
||||
|
||||
if (obj->cookie->def->get_key &&
|
||||
config & FSCACHE_OBJLIST_CONFIG_KEY)
|
||||
keylen = obj->cookie->def->get_key(
|
||||
obj->cookie->netfs_data,
|
||||
buf, 400);
|
||||
|
||||
if (obj->cookie->def->get_aux &&
|
||||
config & FSCACHE_OBJLIST_CONFIG_AUX)
|
||||
auxlen = obj->cookie->def->get_aux(
|
||||
obj->cookie->netfs_data,
|
||||
buf + keylen, 512 - keylen);
|
||||
|
||||
no_cookie = false;
|
||||
switch (cookie->def->type) {
|
||||
case 0:
|
||||
type = "IX";
|
||||
break;
|
||||
case 1:
|
||||
type = "DT";
|
||||
break;
|
||||
default:
|
||||
sprintf(_type, "%02u", cookie->def->type);
|
||||
type = _type;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&obj->lock);
|
||||
|
||||
if (!no_cookie && (keylen > 0 || auxlen > 0)) {
|
||||
seq_printf(m, "%-16s %s %2lx %16p",
|
||||
cookie->def->name,
|
||||
type,
|
||||
cookie->flags,
|
||||
cookie->netfs_data);
|
||||
|
||||
if (cookie->def->get_key &&
|
||||
config & FSCACHE_OBJLIST_CONFIG_KEY)
|
||||
keylen = cookie->def->get_key(cookie->netfs_data,
|
||||
buf, 400);
|
||||
|
||||
if (cookie->def->get_aux &&
|
||||
config & FSCACHE_OBJLIST_CONFIG_AUX)
|
||||
auxlen = cookie->def->get_aux(cookie->netfs_data,
|
||||
buf + keylen, 512 - keylen);
|
||||
fscache_unuse_cookie(obj);
|
||||
|
||||
if (keylen > 0 || auxlen > 0) {
|
||||
seq_printf(m, " ");
|
||||
for (p = buf; keylen > 0; keylen--)
|
||||
seq_printf(m, "%02x", *p++);
|
||||
@ -302,12 +290,11 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
|
||||
seq_printf(m, "%02x", *p++);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (no_cookie)
|
||||
seq_printf(m, "<no_cookie>\n");
|
||||
else
|
||||
seq_printf(m, "\n");
|
||||
} else {
|
||||
seq_printf(m, "<no_netfs>\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
1112
fs/fscache/object.c
1112
fs/fscache/object.c
File diff suppressed because it is too large
Load Diff
@ -35,7 +35,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
|
||||
|
||||
ASSERT(list_empty(&op->pend_link));
|
||||
ASSERT(op->processor != NULL);
|
||||
ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
|
||||
ASSERT(fscache_object_is_available(op->object));
|
||||
ASSERTCMP(atomic_read(&op->usage), >, 0);
|
||||
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
|
||||
|
||||
@ -119,7 +119,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
|
||||
/* need to issue a new write op after this */
|
||||
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
|
||||
ret = 0;
|
||||
} else if (object->state == FSCACHE_OBJECT_CREATING) {
|
||||
} else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
|
||||
op->object = object;
|
||||
object->n_ops++;
|
||||
object->n_exclusive++; /* reads and writes must wait */
|
||||
@ -144,7 +144,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
|
||||
*/
|
||||
static void fscache_report_unexpected_submission(struct fscache_object *object,
|
||||
struct fscache_operation *op,
|
||||
unsigned long ostate)
|
||||
const struct fscache_state *ostate)
|
||||
{
|
||||
static bool once_only;
|
||||
struct fscache_operation *p;
|
||||
@ -155,11 +155,8 @@ static void fscache_report_unexpected_submission(struct fscache_object *object,
|
||||
once_only = true;
|
||||
|
||||
kdebug("unexpected submission OP%x [OBJ%x %s]",
|
||||
op->debug_id, object->debug_id,
|
||||
fscache_object_states[object->state]);
|
||||
kdebug("objstate=%s [%s]",
|
||||
fscache_object_states[object->state],
|
||||
fscache_object_states[ostate]);
|
||||
op->debug_id, object->debug_id, object->state->name);
|
||||
kdebug("objstate=%s [%s]", object->state->name, ostate->name);
|
||||
kdebug("objflags=%lx", object->flags);
|
||||
kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
|
||||
kdebug("ops=%u inp=%u exc=%u",
|
||||
@ -190,7 +187,7 @@ static void fscache_report_unexpected_submission(struct fscache_object *object,
|
||||
int fscache_submit_op(struct fscache_object *object,
|
||||
struct fscache_operation *op)
|
||||
{
|
||||
unsigned long ostate;
|
||||
const struct fscache_state *ostate;
|
||||
int ret;
|
||||
|
||||
_enter("{OBJ%x OP%x},{%u}",
|
||||
@ -226,16 +223,14 @@ int fscache_submit_op(struct fscache_object *object,
|
||||
fscache_run_op(object, op);
|
||||
}
|
||||
ret = 0;
|
||||
} else if (object->state == FSCACHE_OBJECT_CREATING) {
|
||||
} else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
|
||||
op->object = object;
|
||||
object->n_ops++;
|
||||
atomic_inc(&op->usage);
|
||||
list_add_tail(&op->pend_link, &object->pending_ops);
|
||||
fscache_stat(&fscache_n_op_pend);
|
||||
ret = 0;
|
||||
} else if (object->state == FSCACHE_OBJECT_DYING ||
|
||||
object->state == FSCACHE_OBJECT_LC_DYING ||
|
||||
object->state == FSCACHE_OBJECT_WITHDRAWING) {
|
||||
} else if (fscache_object_is_dying(object)) {
|
||||
fscache_stat(&fscache_n_op_rejected);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -ENOBUFS;
|
||||
@ -265,8 +260,8 @@ void fscache_abort_object(struct fscache_object *object)
|
||||
}
|
||||
|
||||
/*
|
||||
* jump start the operation processing on an object
|
||||
* - caller must hold object->lock
|
||||
* Jump start the operation processing on an object. The caller must hold
|
||||
* object->lock.
|
||||
*/
|
||||
void fscache_start_operations(struct fscache_object *object)
|
||||
{
|
||||
@ -428,14 +423,10 @@ void fscache_put_operation(struct fscache_operation *op)
|
||||
|
||||
object = op->object;
|
||||
|
||||
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) {
|
||||
if (atomic_dec_and_test(&object->n_reads)) {
|
||||
clear_bit(FSCACHE_COOKIE_WAITING_ON_READS,
|
||||
&object->cookie->flags);
|
||||
wake_up_bit(&object->cookie->flags,
|
||||
FSCACHE_COOKIE_WAITING_ON_READS);
|
||||
}
|
||||
}
|
||||
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
|
||||
atomic_dec(&object->n_reads);
|
||||
if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
|
||||
fscache_unuse_cookie(object);
|
||||
|
||||
/* now... we may get called with the object spinlock held, so we
|
||||
* complete the cleanup here only if we can immediately acquire the
|
||||
|
@ -109,7 +109,7 @@ page_busy:
|
||||
* allocator as the work threads writing to the cache may all end up
|
||||
* sleeping on memory allocation, so we may need to impose a timeout
|
||||
* too. */
|
||||
if (!(gfp & __GFP_WAIT)) {
|
||||
if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
|
||||
fscache_stat(&fscache_n_store_vmscan_busy);
|
||||
return false;
|
||||
}
|
||||
@ -163,10 +163,12 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
|
||||
|
||||
fscache_stat(&fscache_n_attr_changed_calls);
|
||||
|
||||
if (fscache_object_is_active(object)) {
|
||||
if (fscache_object_is_active(object) &&
|
||||
fscache_use_cookie(object)) {
|
||||
fscache_stat(&fscache_n_cop_attr_changed);
|
||||
ret = object->cache->ops->attr_changed(object);
|
||||
fscache_stat_d(&fscache_n_cop_attr_changed);
|
||||
fscache_unuse_cookie(object);
|
||||
if (ret < 0)
|
||||
fscache_abort_object(object);
|
||||
}
|
||||
@ -233,7 +235,7 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
|
||||
|
||||
_enter("{OP%x}", op->op.debug_id);
|
||||
|
||||
ASSERTCMP(op->n_pages, ==, 0);
|
||||
ASSERTCMP(atomic_read(&op->n_pages), ==, 0);
|
||||
|
||||
fscache_hist(fscache_retrieval_histogram, op->start_time);
|
||||
if (op->context)
|
||||
@ -246,6 +248,7 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
|
||||
* allocate a retrieval op
|
||||
*/
|
||||
static struct fscache_retrieval *fscache_alloc_retrieval(
|
||||
struct fscache_cookie *cookie,
|
||||
struct address_space *mapping,
|
||||
fscache_rw_complete_t end_io_func,
|
||||
void *context)
|
||||
@ -260,7 +263,10 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
|
||||
}
|
||||
|
||||
fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
|
||||
op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
|
||||
atomic_inc(&cookie->n_active);
|
||||
op->op.flags = FSCACHE_OP_MYTHREAD |
|
||||
(1UL << FSCACHE_OP_WAITING) |
|
||||
(1UL << FSCACHE_OP_UNUSE_COOKIE);
|
||||
op->mapping = mapping;
|
||||
op->end_io_func = end_io_func;
|
||||
op->context = context;
|
||||
@ -310,7 +316,7 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
|
||||
struct fscache_retrieval *op =
|
||||
container_of(_op, struct fscache_retrieval, op);
|
||||
|
||||
op->n_pages = 0;
|
||||
atomic_set(&op->n_pages, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -394,12 +400,13 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
|
||||
if (fscache_wait_for_deferred_lookup(cookie) < 0)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
|
||||
op = fscache_alloc_retrieval(cookie, page->mapping,
|
||||
end_io_func,context);
|
||||
if (!op) {
|
||||
_leave(" = -ENOMEM");
|
||||
return -ENOMEM;
|
||||
}
|
||||
op->n_pages = 1;
|
||||
atomic_set(&op->n_pages, 1);
|
||||
|
||||
spin_lock(&cookie->lock);
|
||||
|
||||
@ -408,7 +415,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
|
||||
object = hlist_entry(cookie->backing_objects.first,
|
||||
struct fscache_object, cookie_link);
|
||||
|
||||
ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
|
||||
ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
|
||||
|
||||
atomic_inc(&object->n_reads);
|
||||
__set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
|
||||
@ -465,6 +472,7 @@ nobufs_unlock_dec:
|
||||
atomic_dec(&object->n_reads);
|
||||
nobufs_unlock:
|
||||
spin_unlock(&cookie->lock);
|
||||
atomic_dec(&cookie->n_active);
|
||||
kfree(op);
|
||||
nobufs:
|
||||
fscache_stat(&fscache_n_retrievals_nobufs);
|
||||
@ -522,10 +530,10 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
|
||||
if (fscache_wait_for_deferred_lookup(cookie) < 0)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
op = fscache_alloc_retrieval(mapping, end_io_func, context);
|
||||
op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
|
||||
if (!op)
|
||||
return -ENOMEM;
|
||||
op->n_pages = *nr_pages;
|
||||
atomic_set(&op->n_pages, *nr_pages);
|
||||
|
||||
spin_lock(&cookie->lock);
|
||||
|
||||
@ -589,6 +597,7 @@ nobufs_unlock_dec:
|
||||
atomic_dec(&object->n_reads);
|
||||
nobufs_unlock:
|
||||
spin_unlock(&cookie->lock);
|
||||
atomic_dec(&cookie->n_active);
|
||||
kfree(op);
|
||||
nobufs:
|
||||
fscache_stat(&fscache_n_retrievals_nobufs);
|
||||
@ -631,10 +640,10 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
|
||||
if (fscache_wait_for_deferred_lookup(cookie) < 0)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
|
||||
op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
|
||||
if (!op)
|
||||
return -ENOMEM;
|
||||
op->n_pages = 1;
|
||||
atomic_set(&op->n_pages, 1);
|
||||
|
||||
spin_lock(&cookie->lock);
|
||||
|
||||
@ -675,6 +684,7 @@ error:
|
||||
|
||||
nobufs_unlock:
|
||||
spin_unlock(&cookie->lock);
|
||||
atomic_dec(&cookie->n_active);
|
||||
kfree(op);
|
||||
nobufs:
|
||||
fscache_stat(&fscache_n_allocs_nobufs);
|
||||
@ -729,8 +739,9 @@ static void fscache_write_op(struct fscache_operation *_op)
|
||||
*/
|
||||
spin_unlock(&object->lock);
|
||||
fscache_op_complete(&op->op, false);
|
||||
_leave(" [cancel] op{f=%lx s=%u} obj{s=%u f=%lx}",
|
||||
_op->flags, _op->state, object->state, object->flags);
|
||||
_leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
|
||||
_op->flags, _op->state, object->state->short_name,
|
||||
object->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -796,11 +807,16 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
|
||||
|
||||
_enter("");
|
||||
|
||||
while (spin_lock(&cookie->stores_lock),
|
||||
n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
|
||||
ARRAY_SIZE(results),
|
||||
FSCACHE_COOKIE_PENDING_TAG),
|
||||
n > 0) {
|
||||
for (;;) {
|
||||
spin_lock(&cookie->stores_lock);
|
||||
n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
|
||||
ARRAY_SIZE(results),
|
||||
FSCACHE_COOKIE_PENDING_TAG);
|
||||
if (n == 0) {
|
||||
spin_unlock(&cookie->stores_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = n - 1; i >= 0; i--) {
|
||||
page = results[i];
|
||||
radix_tree_delete(&cookie->stores, page->index);
|
||||
@ -812,7 +828,6 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
|
||||
page_cache_release(results[i]);
|
||||
}
|
||||
|
||||
spin_unlock(&cookie->stores_lock);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
@ -829,14 +844,12 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
|
||||
* (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
|
||||
* set)
|
||||
*
|
||||
* (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
|
||||
* fill op)
|
||||
* (a) no writes yet
|
||||
*
|
||||
* (b) writes deferred till post-creation (mark page for writing and
|
||||
* return immediately)
|
||||
*
|
||||
* (2) negative lookup, object created, initial fill being made from netfs
|
||||
* (FSCACHE_COOKIE_INITIAL_FILL is set)
|
||||
*
|
||||
* (a) fill point not yet reached this page (mark page for writing and
|
||||
* return)
|
||||
@ -873,7 +886,9 @@ int __fscache_write_page(struct fscache_cookie *cookie,
|
||||
|
||||
fscache_operation_init(&op->op, fscache_write_op,
|
||||
fscache_release_write_op);
|
||||
op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
|
||||
op->op.flags = FSCACHE_OP_ASYNC |
|
||||
(1 << FSCACHE_OP_WAITING) |
|
||||
(1 << FSCACHE_OP_UNUSE_COOKIE);
|
||||
|
||||
ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
|
||||
if (ret < 0)
|
||||
@ -919,6 +934,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
|
||||
op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
|
||||
op->store_limit = object->store_limit;
|
||||
|
||||
atomic_inc(&cookie->n_active);
|
||||
if (fscache_submit_op(object, &op->op) < 0)
|
||||
goto submit_failed;
|
||||
|
||||
@ -945,6 +961,7 @@ already_pending:
|
||||
return 0;
|
||||
|
||||
submit_failed:
|
||||
atomic_dec(&cookie->n_active);
|
||||
spin_lock(&cookie->stores_lock);
|
||||
radix_tree_delete(&cookie->stores, page->index);
|
||||
spin_unlock(&cookie->stores_lock);
|
||||
|
@ -97,7 +97,8 @@ struct fscache_operation {
|
||||
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
|
||||
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
|
||||
#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
|
||||
#define FSCACHE_OP_KEEP_FLAGS 0x0070 /* flags to keep when repurposing an op */
|
||||
#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */
|
||||
#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */
|
||||
|
||||
enum fscache_operation_state state;
|
||||
atomic_t usage;
|
||||
@ -150,7 +151,7 @@ struct fscache_retrieval {
|
||||
void *context; /* netfs read context (pinned) */
|
||||
struct list_head to_do; /* list of things to be done by the backend */
|
||||
unsigned long start_time; /* time at which retrieval started */
|
||||
unsigned n_pages; /* number of pages to be retrieved */
|
||||
atomic_t n_pages; /* number of pages to be retrieved */
|
||||
};
|
||||
|
||||
typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
|
||||
@ -194,15 +195,14 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
|
||||
static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
|
||||
int n_pages)
|
||||
{
|
||||
op->n_pages -= n_pages;
|
||||
if (op->n_pages <= 0)
|
||||
atomic_sub(n_pages, &op->n_pages);
|
||||
if (atomic_read(&op->n_pages) <= 0)
|
||||
fscache_op_complete(&op->op, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_put_retrieval - Drop a reference to a retrieval operation
|
||||
* @op: The retrieval operation affected
|
||||
* @n_pages: The number of pages to account for
|
||||
*
|
||||
* Drop a reference to a retrieval operation.
|
||||
*/
|
||||
@ -314,6 +314,7 @@ struct fscache_cache_ops {
|
||||
struct fscache_cookie {
|
||||
atomic_t usage; /* number of users of this cookie */
|
||||
atomic_t n_children; /* number of children of this cookie */
|
||||
atomic_t n_active; /* number of active users of netfs ptrs */
|
||||
spinlock_t lock;
|
||||
spinlock_t stores_lock; /* lock on page store tree */
|
||||
struct hlist_head backing_objects; /* object(s) backing this file/index */
|
||||
@ -326,13 +327,11 @@ struct fscache_cookie {
|
||||
|
||||
unsigned long flags;
|
||||
#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
|
||||
#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */
|
||||
#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */
|
||||
#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */
|
||||
#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */
|
||||
#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */
|
||||
#define FSCACHE_COOKIE_WAITING_ON_READS 6 /* T if cookie is waiting on reads */
|
||||
#define FSCACHE_COOKIE_INVALIDATING 7 /* T if cookie is being invalidated */
|
||||
#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
|
||||
#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
|
||||
#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
|
||||
#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
|
||||
#define FSCACHE_COOKIE_RETIRED 5 /* T if cookie was retired */
|
||||
};
|
||||
|
||||
extern struct fscache_cookie fscache_fsdef_index;
|
||||
@ -341,45 +340,40 @@ extern struct fscache_cookie fscache_fsdef_index;
|
||||
* Event list for fscache_object::{event_mask,events}
|
||||
*/
|
||||
enum {
|
||||
FSCACHE_OBJECT_EV_REQUEUE, /* T if object should be requeued */
|
||||
FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */
|
||||
FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */
|
||||
FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */
|
||||
FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
|
||||
FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */
|
||||
FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */
|
||||
FSCACHE_OBJECT_EV_RELEASE, /* T if netfs requested object release */
|
||||
FSCACHE_OBJECT_EV_RETIRE, /* T if netfs requested object retirement */
|
||||
FSCACHE_OBJECT_EV_WITHDRAW, /* T if cache requested object withdrawal */
|
||||
FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */
|
||||
NR_FSCACHE_OBJECT_EVENTS
|
||||
};
|
||||
|
||||
#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
|
||||
|
||||
/*
|
||||
* States for object state machine.
|
||||
*/
|
||||
struct fscache_transition {
|
||||
unsigned long events;
|
||||
const struct fscache_state *transit_to;
|
||||
};
|
||||
|
||||
struct fscache_state {
|
||||
char name[24];
|
||||
char short_name[8];
|
||||
const struct fscache_state *(*work)(struct fscache_object *object,
|
||||
int event);
|
||||
const struct fscache_transition transitions[];
|
||||
};
|
||||
|
||||
/*
|
||||
* on-disk cache file or index handle
|
||||
*/
|
||||
struct fscache_object {
|
||||
enum fscache_object_state {
|
||||
FSCACHE_OBJECT_INIT, /* object in initial unbound state */
|
||||
FSCACHE_OBJECT_LOOKING_UP, /* looking up object */
|
||||
FSCACHE_OBJECT_CREATING, /* creating object */
|
||||
|
||||
/* active states */
|
||||
FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */
|
||||
FSCACHE_OBJECT_ACTIVE, /* object is usable */
|
||||
FSCACHE_OBJECT_INVALIDATING, /* object is invalidating */
|
||||
FSCACHE_OBJECT_UPDATING, /* object is updating */
|
||||
|
||||
/* terminal states */
|
||||
FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */
|
||||
FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */
|
||||
FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */
|
||||
FSCACHE_OBJECT_RELEASING, /* releasing object */
|
||||
FSCACHE_OBJECT_RECYCLING, /* retiring object */
|
||||
FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
|
||||
FSCACHE_OBJECT_DEAD, /* object is now dead */
|
||||
FSCACHE_OBJECT__NSTATES
|
||||
} state;
|
||||
|
||||
const struct fscache_state *state; /* Object state machine state */
|
||||
const struct fscache_transition *oob_table; /* OOB state transition table */
|
||||
int debug_id; /* debugging ID */
|
||||
int n_children; /* number of child objects */
|
||||
int n_ops; /* number of extant ops on object */
|
||||
@ -390,6 +384,7 @@ struct fscache_object {
|
||||
spinlock_t lock; /* state and operations lock */
|
||||
|
||||
unsigned long lookup_jif; /* time at which lookup started */
|
||||
unsigned long oob_event_mask; /* OOB events this object is interested in */
|
||||
unsigned long event_mask; /* events this object is interested in */
|
||||
unsigned long events; /* events to be processed by this object
|
||||
* (order is important - using fls) */
|
||||
@ -398,6 +393,9 @@ struct fscache_object {
|
||||
#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
|
||||
#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
|
||||
#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
|
||||
#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
|
||||
#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
|
||||
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
|
||||
|
||||
struct list_head cache_link; /* link in cache->object_list */
|
||||
struct hlist_node cookie_link; /* link in cookie->backing_objects */
|
||||
@ -415,62 +413,40 @@ struct fscache_object {
|
||||
loff_t store_limit_l; /* current storage limit */
|
||||
};
|
||||
|
||||
extern const char *fscache_object_states[];
|
||||
|
||||
#define fscache_object_is_active(obj) \
|
||||
(!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
|
||||
(obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
|
||||
(obj)->state < FSCACHE_OBJECT_DYING)
|
||||
|
||||
#define fscache_object_is_dead(obj) \
|
||||
(test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
|
||||
(obj)->state >= FSCACHE_OBJECT_DYING)
|
||||
|
||||
extern void fscache_object_work_func(struct work_struct *work);
|
||||
|
||||
/**
|
||||
* fscache_object_init - Initialise a cache object description
|
||||
* @object: Object description
|
||||
*
|
||||
* Initialise a cache object description to its basic values.
|
||||
*
|
||||
* See Documentation/filesystems/caching/backend-api.txt for a complete
|
||||
* description.
|
||||
*/
|
||||
static inline
|
||||
void fscache_object_init(struct fscache_object *object,
|
||||
struct fscache_cookie *cookie,
|
||||
struct fscache_cache *cache)
|
||||
{
|
||||
atomic_inc(&cache->object_count);
|
||||
|
||||
object->state = FSCACHE_OBJECT_INIT;
|
||||
spin_lock_init(&object->lock);
|
||||
INIT_LIST_HEAD(&object->cache_link);
|
||||
INIT_HLIST_NODE(&object->cookie_link);
|
||||
INIT_WORK(&object->work, fscache_object_work_func);
|
||||
INIT_LIST_HEAD(&object->dependents);
|
||||
INIT_LIST_HEAD(&object->dep_link);
|
||||
INIT_LIST_HEAD(&object->pending_ops);
|
||||
object->n_children = 0;
|
||||
object->n_ops = object->n_in_progress = object->n_exclusive = 0;
|
||||
object->events = object->event_mask = 0;
|
||||
object->flags = 0;
|
||||
object->store_limit = 0;
|
||||
object->store_limit_l = 0;
|
||||
object->cache = cache;
|
||||
object->cookie = cookie;
|
||||
object->parent = NULL;
|
||||
}
|
||||
extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
|
||||
struct fscache_cache *);
|
||||
extern void fscache_object_destroy(struct fscache_object *);
|
||||
|
||||
extern void fscache_object_lookup_negative(struct fscache_object *object);
|
||||
extern void fscache_obtained_object(struct fscache_object *object);
|
||||
|
||||
#ifdef CONFIG_FSCACHE_OBJECT_LIST
|
||||
extern void fscache_object_destroy(struct fscache_object *object);
|
||||
#else
|
||||
#define fscache_object_destroy(object) do {} while(0)
|
||||
#endif
|
||||
static inline bool fscache_object_is_live(struct fscache_object *object)
|
||||
{
|
||||
return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
|
||||
}
|
||||
|
||||
static inline bool fscache_object_is_dying(struct fscache_object *object)
|
||||
{
|
||||
return !fscache_object_is_live(object);
|
||||
}
|
||||
|
||||
static inline bool fscache_object_is_available(struct fscache_object *object)
|
||||
{
|
||||
return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
|
||||
}
|
||||
|
||||
static inline bool fscache_object_is_active(struct fscache_object *object)
|
||||
{
|
||||
return fscache_object_is_available(object) &&
|
||||
fscache_object_is_live(object) &&
|
||||
!test_bit(FSCACHE_IOERROR, &object->cache->flags);
|
||||
}
|
||||
|
||||
static inline bool fscache_object_is_dead(struct fscache_object *object)
|
||||
{
|
||||
return fscache_object_is_dying(object) &&
|
||||
test_bit(FSCACHE_IOERROR, &object->cache->flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_object_destroyed - Note destruction of an object in a cache
|
||||
@ -531,6 +507,33 @@ static inline void fscache_end_io(struct fscache_retrieval *op,
|
||||
op->end_io_func(page, op->context, error);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_use_cookie - Request usage of cookie attached to an object
|
||||
* @object: Object description
|
||||
*
|
||||
* Request usage of the cookie attached to an object. NULL is returned if the
|
||||
* relinquishment had reduced the cookie usage count to 0.
|
||||
*/
|
||||
static inline bool fscache_use_cookie(struct fscache_object *object)
|
||||
{
|
||||
struct fscache_cookie *cookie = object->cookie;
|
||||
return atomic_inc_not_zero(&cookie->n_active) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_unuse_cookie - Cease usage of cookie attached to an object
|
||||
* @object: Object description
|
||||
*
|
||||
* Cease usage of the cookie attached to an object. When the users count
|
||||
* reaches zero then the cookie relinquishment will be permitted to proceed.
|
||||
*/
|
||||
static inline void fscache_unuse_cookie(struct fscache_object *object)
|
||||
{
|
||||
struct fscache_cookie *cookie = object->cookie;
|
||||
if (atomic_dec_and_test(&cookie->n_active))
|
||||
wake_up_atomic_t(&cookie->n_active);
|
||||
}
|
||||
|
||||
/*
|
||||
* out-of-line cache backend functions
|
||||
*/
|
||||
|
@ -23,6 +23,7 @@ struct __wait_queue {
|
||||
struct wait_bit_key {
|
||||
void *flags;
|
||||
int bit_nr;
|
||||
#define WAIT_ATOMIC_T_BIT_NR -1
|
||||
};
|
||||
|
||||
struct wait_bit_queue {
|
||||
@ -60,6 +61,9 @@ struct task_struct;
|
||||
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
|
||||
{ .flags = word, .bit_nr = bit, }
|
||||
|
||||
#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
|
||||
{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
|
||||
|
||||
extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
|
||||
|
||||
#define init_waitqueue_head(q) \
|
||||
@ -146,8 +150,10 @@ void __wake_up_bit(wait_queue_head_t *, void *, int);
|
||||
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
|
||||
int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
|
||||
void wake_up_bit(void *, int);
|
||||
void wake_up_atomic_t(atomic_t *);
|
||||
int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
|
||||
int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
|
||||
int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
|
||||
wait_queue_head_t *bit_waitqueue(void *, int);
|
||||
|
||||
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
||||
@ -902,5 +908,23 @@ static inline int wait_on_bit_lock(void *word, int bit,
|
||||
return 0;
|
||||
return out_of_line_wait_on_bit_lock(word, bit, action, mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_on_atomic_t - Wait for an atomic_t to become 0
|
||||
* @val: The atomic value being waited on, a kernel virtual address
|
||||
* @action: the function used to sleep, which may take special actions
|
||||
* @mode: the task state to sleep in
|
||||
*
|
||||
* Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
|
||||
* the purpose of getting a waitqueue, but we set the key to a bit number
|
||||
* outside of the target 'word'.
|
||||
*/
|
||||
static inline
|
||||
int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
|
||||
{
|
||||
if (atomic_read(val) == 0)
|
||||
return 0;
|
||||
return out_of_line_wait_on_atomic_t(val, action, mode);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -287,3 +287,91 @@ wait_queue_head_t *bit_waitqueue(void *word, int bit)
|
||||
return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
|
||||
}
|
||||
EXPORT_SYMBOL(bit_waitqueue);
|
||||
|
||||
/*
|
||||
* Manipulate the atomic_t address to produce a better bit waitqueue table hash
|
||||
* index (we're keying off bit -1, but that would produce a horrible hash
|
||||
* value).
|
||||
*/
|
||||
static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
|
||||
{
|
||||
if (BITS_PER_LONG == 64) {
|
||||
unsigned long q = (unsigned long)p;
|
||||
return bit_waitqueue((void *)(q & ~1), q & 1);
|
||||
}
|
||||
return bit_waitqueue(p, 0);
|
||||
}
|
||||
|
||||
static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
|
||||
void *arg)
|
||||
{
|
||||
struct wait_bit_key *key = arg;
|
||||
struct wait_bit_queue *wait_bit
|
||||
= container_of(wait, struct wait_bit_queue, wait);
|
||||
atomic_t *val = key->flags;
|
||||
|
||||
if (wait_bit->key.flags != key->flags ||
|
||||
wait_bit->key.bit_nr != key->bit_nr ||
|
||||
atomic_read(val) != 0)
|
||||
return 0;
|
||||
return autoremove_wake_function(wait, mode, sync, key);
|
||||
}
|
||||
|
||||
/*
|
||||
* To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
|
||||
* the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
|
||||
* return codes halt waiting and return.
|
||||
*/
|
||||
static __sched
|
||||
int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
int (*action)(atomic_t *), unsigned mode)
|
||||
{
|
||||
atomic_t *val;
|
||||
int ret = 0;
|
||||
|
||||
do {
|
||||
prepare_to_wait(wq, &q->wait, mode);
|
||||
val = q->key.flags;
|
||||
if (atomic_read(val) == 0)
|
||||
ret = (*action)(val);
|
||||
} while (!ret && atomic_read(val) != 0);
|
||||
finish_wait(wq, &q->wait);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define DEFINE_WAIT_ATOMIC_T(name, p) \
|
||||
struct wait_bit_queue name = { \
|
||||
.key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
|
||||
.wait = { \
|
||||
.private = current, \
|
||||
.func = wake_atomic_t_function, \
|
||||
.task_list = \
|
||||
LIST_HEAD_INIT((name).wait.task_list), \
|
||||
}, \
|
||||
}
|
||||
|
||||
__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
|
||||
unsigned mode)
|
||||
{
|
||||
wait_queue_head_t *wq = atomic_t_waitqueue(p);
|
||||
DEFINE_WAIT_ATOMIC_T(wait, p);
|
||||
|
||||
return __wait_on_atomic_t(wq, &wait, action, mode);
|
||||
}
|
||||
EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
|
||||
|
||||
/**
|
||||
* wake_up_atomic_t - Wake up a waiter on a atomic_t
|
||||
* @word: The word being waited on, a kernel virtual address
|
||||
* @bit: The bit of the word being waited on
|
||||
*
|
||||
* Wake up anyone waiting for the atomic_t to go to zero.
|
||||
*
|
||||
* Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
|
||||
* check is done by the waiter's wake function, not the by the waker itself).
|
||||
*/
|
||||
void wake_up_atomic_t(atomic_t *p)
|
||||
{
|
||||
__wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
|
||||
}
|
||||
EXPORT_SYMBOL(wake_up_atomic_t);
|
||||
|
Loading…
Reference in New Issue
Block a user