mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
NFS: Replace NFS_I(inode)->req_lock with inode->i_lock
There is no justification for keeping a special spinlock for the exclusive use of the NFS writeback code. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
parent
4e56e082dd
commit
587142f85f
@ -1154,7 +1154,6 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||||||
struct nfs_inode *nfsi = (struct nfs_inode *) foo;
|
struct nfs_inode *nfsi = (struct nfs_inode *) foo;
|
||||||
|
|
||||||
inode_init_once(&nfsi->vfs_inode);
|
inode_init_once(&nfsi->vfs_inode);
|
||||||
spin_lock_init(&nfsi->req_lock);
|
|
||||||
INIT_LIST_HEAD(&nfsi->open_files);
|
INIT_LIST_HEAD(&nfsi->open_files);
|
||||||
INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
|
INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
|
||||||
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
|
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
|
||||||
|
@ -126,12 +126,13 @@ static int nfs_set_page_tag_locked(struct nfs_page *req)
|
|||||||
*/
|
*/
|
||||||
void nfs_clear_page_tag_locked(struct nfs_page *req)
|
void nfs_clear_page_tag_locked(struct nfs_page *req)
|
||||||
{
|
{
|
||||||
struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
|
struct inode *inode = req->wb_context->path.dentry->d_inode;
|
||||||
|
struct nfs_inode *nfsi = NFS_I(inode);
|
||||||
|
|
||||||
if (req->wb_page != NULL) {
|
if (req->wb_page != NULL) {
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
|
radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
}
|
}
|
||||||
nfs_unlock_request(req);
|
nfs_unlock_request(req);
|
||||||
}
|
}
|
||||||
@ -390,7 +391,7 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
|
|||||||
* If the number of requests is set to 0, the entire address_space
|
* If the number of requests is set to 0, the entire address_space
|
||||||
* starting at index idx_start, is scanned.
|
* starting at index idx_start, is scanned.
|
||||||
* The requests are *not* checked to ensure that they form a contiguous set.
|
* The requests are *not* checked to ensure that they form a contiguous set.
|
||||||
* You must be holding the inode's req_lock when calling this function
|
* You must be holding the inode's i_lock when calling this function
|
||||||
*/
|
*/
|
||||||
int nfs_scan_list(struct nfs_inode *nfsi,
|
int nfs_scan_list(struct nfs_inode *nfsi,
|
||||||
struct list_head *dst, pgoff_t idx_start,
|
struct list_head *dst, pgoff_t idx_start,
|
||||||
@ -430,7 +431,7 @@ int nfs_scan_list(struct nfs_inode *nfsi,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* for latency reduction */
|
/* for latency reduction */
|
||||||
cond_resched_lock(&nfsi->req_lock);
|
cond_resched_lock(&nfsi->vfs_inode.i_lock);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
return res;
|
return res;
|
||||||
|
@ -124,12 +124,12 @@ static struct nfs_page *nfs_page_find_request_locked(struct page *page)
|
|||||||
|
|
||||||
static struct nfs_page *nfs_page_find_request(struct page *page)
|
static struct nfs_page *nfs_page_find_request(struct page *page)
|
||||||
{
|
{
|
||||||
|
struct inode *inode = page->mapping->host;
|
||||||
struct nfs_page *req = NULL;
|
struct nfs_page *req = NULL;
|
||||||
spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
|
|
||||||
|
|
||||||
spin_lock(req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
req = nfs_page_find_request_locked(page);
|
req = nfs_page_find_request_locked(page);
|
||||||
spin_unlock(req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,16 +251,16 @@ static void nfs_end_page_writeback(struct page *page)
|
|||||||
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
||||||
struct page *page)
|
struct page *page)
|
||||||
{
|
{
|
||||||
|
struct inode *inode = page->mapping->host;
|
||||||
|
struct nfs_inode *nfsi = NFS_I(inode);
|
||||||
struct nfs_page *req;
|
struct nfs_page *req;
|
||||||
struct nfs_inode *nfsi = NFS_I(page->mapping->host);
|
|
||||||
spinlock_t *req_lock = &nfsi->req_lock;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
for(;;) {
|
for(;;) {
|
||||||
req = nfs_page_find_request_locked(page);
|
req = nfs_page_find_request_locked(page);
|
||||||
if (req == NULL) {
|
if (req == NULL) {
|
||||||
spin_unlock(req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
if (nfs_lock_request_dontget(req))
|
if (nfs_lock_request_dontget(req))
|
||||||
@ -270,28 +270,28 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
|||||||
* succeed provided that someone hasn't already marked the
|
* succeed provided that someone hasn't already marked the
|
||||||
* request as dirty (in which case we don't care).
|
* request as dirty (in which case we don't care).
|
||||||
*/
|
*/
|
||||||
spin_unlock(req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
ret = nfs_wait_on_request(req);
|
ret = nfs_wait_on_request(req);
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
spin_lock(req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
}
|
}
|
||||||
if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
|
if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
|
||||||
/* This request is marked for commit */
|
/* This request is marked for commit */
|
||||||
spin_unlock(req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
nfs_unlock_request(req);
|
nfs_unlock_request(req);
|
||||||
nfs_pageio_complete(pgio);
|
nfs_pageio_complete(pgio);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
if (nfs_set_page_writeback(page) != 0) {
|
if (nfs_set_page_writeback(page) != 0) {
|
||||||
spin_unlock(req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
|
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
|
||||||
NFS_PAGE_TAG_LOCKED);
|
NFS_PAGE_TAG_LOCKED);
|
||||||
ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
|
ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
|
||||||
spin_unlock(req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
nfs_pageio_add_request(pgio, req);
|
nfs_pageio_add_request(pgio, req);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -412,7 +412,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
|
|||||||
|
|
||||||
BUG_ON (!NFS_WBACK_BUSY(req));
|
BUG_ON (!NFS_WBACK_BUSY(req));
|
||||||
|
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
set_page_private(req->wb_page, 0);
|
set_page_private(req->wb_page, 0);
|
||||||
ClearPagePrivate(req->wb_page);
|
ClearPagePrivate(req->wb_page);
|
||||||
radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
|
radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
|
||||||
@ -420,11 +420,11 @@ static void nfs_inode_remove_request(struct nfs_page *req)
|
|||||||
__set_page_dirty_nobuffers(req->wb_page);
|
__set_page_dirty_nobuffers(req->wb_page);
|
||||||
nfsi->npages--;
|
nfsi->npages--;
|
||||||
if (!nfsi->npages) {
|
if (!nfsi->npages) {
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
nfs_end_data_update(inode);
|
nfs_end_data_update(inode);
|
||||||
iput(inode);
|
iput(inode);
|
||||||
} else
|
} else
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
nfs_clear_request(req);
|
nfs_clear_request(req);
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
}
|
}
|
||||||
@ -458,13 +458,13 @@ nfs_mark_request_commit(struct nfs_page *req)
|
|||||||
struct inode *inode = req->wb_context->path.dentry->d_inode;
|
struct inode *inode = req->wb_context->path.dentry->d_inode;
|
||||||
struct nfs_inode *nfsi = NFS_I(inode);
|
struct nfs_inode *nfsi = NFS_I(inode);
|
||||||
|
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
nfsi->ncommit++;
|
nfsi->ncommit++;
|
||||||
set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
|
set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
|
||||||
radix_tree_tag_set(&nfsi->nfs_page_tree,
|
radix_tree_tag_set(&nfsi->nfs_page_tree,
|
||||||
req->wb_index,
|
req->wb_index,
|
||||||
NFS_PAGE_TAG_COMMIT);
|
NFS_PAGE_TAG_COMMIT);
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
||||||
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
||||||
}
|
}
|
||||||
@ -534,10 +534,10 @@ static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, u
|
|||||||
BUG_ON(!NFS_WBACK_BUSY(req));
|
BUG_ON(!NFS_WBACK_BUSY(req));
|
||||||
|
|
||||||
kref_get(&req->wb_kref);
|
kref_get(&req->wb_kref);
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
error = nfs_wait_on_request(req);
|
error = nfs_wait_on_request(req);
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
if (error < 0)
|
if (error < 0)
|
||||||
return error;
|
return error;
|
||||||
res++;
|
res++;
|
||||||
@ -602,7 +602,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
|
|||||||
{
|
{
|
||||||
struct address_space *mapping = page->mapping;
|
struct address_space *mapping = page->mapping;
|
||||||
struct inode *inode = mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
struct nfs_inode *nfsi = NFS_I(inode);
|
|
||||||
struct nfs_page *req, *new = NULL;
|
struct nfs_page *req, *new = NULL;
|
||||||
pgoff_t rqend, end;
|
pgoff_t rqend, end;
|
||||||
|
|
||||||
@ -612,13 +611,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
|
|||||||
/* Loop over all inode entries and see if we find
|
/* Loop over all inode entries and see if we find
|
||||||
* A request for the page we wish to update
|
* A request for the page we wish to update
|
||||||
*/
|
*/
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
req = nfs_page_find_request_locked(page);
|
req = nfs_page_find_request_locked(page);
|
||||||
if (req) {
|
if (req) {
|
||||||
if (!nfs_lock_request_dontget(req)) {
|
if (!nfs_lock_request_dontget(req)) {
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
error = nfs_wait_on_request(req);
|
error = nfs_wait_on_request(req);
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
if (error < 0) {
|
if (error < 0) {
|
||||||
@ -628,7 +627,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
|
|||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
if (new)
|
if (new)
|
||||||
nfs_release_request(new);
|
nfs_release_request(new);
|
||||||
break;
|
break;
|
||||||
@ -639,14 +638,14 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
|
|||||||
nfs_lock_request_dontget(new);
|
nfs_lock_request_dontget(new);
|
||||||
error = nfs_inode_add_request(inode, new);
|
error = nfs_inode_add_request(inode, new);
|
||||||
if (error) {
|
if (error) {
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
nfs_unlock_request(new);
|
nfs_unlock_request(new);
|
||||||
return ERR_PTR(error);
|
return ERR_PTR(error);
|
||||||
}
|
}
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return new;
|
return new;
|
||||||
}
|
}
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
|
|
||||||
new = nfs_create_request(ctx, inode, page, offset, bytes);
|
new = nfs_create_request(ctx, inode, page, offset, bytes);
|
||||||
if (IS_ERR(new))
|
if (IS_ERR(new))
|
||||||
@ -974,9 +973,9 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (nfs_write_need_commit(data)) {
|
if (nfs_write_need_commit(data)) {
|
||||||
spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
|
struct inode *inode = page->mapping->host;
|
||||||
|
|
||||||
spin_lock(req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
|
if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
|
||||||
/* Do nothing we need to resend the writes */
|
/* Do nothing we need to resend the writes */
|
||||||
} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
|
} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
|
||||||
@ -987,7 +986,7 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
|
|||||||
clear_bit(PG_NEED_COMMIT, &req->wb_flags);
|
clear_bit(PG_NEED_COMMIT, &req->wb_flags);
|
||||||
dprintk(" server reboot detected\n");
|
dprintk(" server reboot detected\n");
|
||||||
}
|
}
|
||||||
spin_unlock(req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
} else
|
} else
|
||||||
dprintk(" OK\n");
|
dprintk(" OK\n");
|
||||||
|
|
||||||
@ -1277,13 +1276,12 @@ static const struct rpc_call_ops nfs_commit_ops = {
|
|||||||
|
|
||||||
int nfs_commit_inode(struct inode *inode, int how)
|
int nfs_commit_inode(struct inode *inode, int how)
|
||||||
{
|
{
|
||||||
struct nfs_inode *nfsi = NFS_I(inode);
|
|
||||||
LIST_HEAD(head);
|
LIST_HEAD(head);
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
res = nfs_scan_commit(inode, &head, 0, 0);
|
res = nfs_scan_commit(inode, &head, 0, 0);
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
if (res) {
|
if (res) {
|
||||||
int error = nfs_commit_list(inode, &head, how);
|
int error = nfs_commit_list(inode, &head, how);
|
||||||
if (error < 0)
|
if (error < 0)
|
||||||
@ -1301,7 +1299,6 @@ static inline int nfs_commit_list(struct inode *inode, struct list_head *head, i
|
|||||||
long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
|
long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
|
||||||
{
|
{
|
||||||
struct inode *inode = mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
struct nfs_inode *nfsi = NFS_I(inode);
|
|
||||||
pgoff_t idx_start, idx_end;
|
pgoff_t idx_start, idx_end;
|
||||||
unsigned int npages = 0;
|
unsigned int npages = 0;
|
||||||
LIST_HEAD(head);
|
LIST_HEAD(head);
|
||||||
@ -1323,7 +1320,7 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
how &= ~FLUSH_NOCOMMIT;
|
how &= ~FLUSH_NOCOMMIT;
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
do {
|
do {
|
||||||
ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
|
ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
@ -1334,18 +1331,19 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
|
|||||||
if (pages == 0)
|
if (pages == 0)
|
||||||
break;
|
break;
|
||||||
if (how & FLUSH_INVALIDATE) {
|
if (how & FLUSH_INVALIDATE) {
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
nfs_cancel_commit_list(&head);
|
nfs_cancel_commit_list(&head);
|
||||||
ret = pages;
|
ret = pages;
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
pages += nfs_scan_commit(inode, &head, 0, 0);
|
pages += nfs_scan_commit(inode, &head, 0, 0);
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
ret = nfs_commit_list(inode, &head, how);
|
ret = nfs_commit_list(inode, &head, how);
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&inode->i_lock);
|
||||||
|
|
||||||
} while (ret >= 0);
|
} while (ret >= 0);
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1439,7 +1437,6 @@ int nfs_set_page_dirty(struct page *page)
|
|||||||
{
|
{
|
||||||
struct address_space *mapping = page->mapping;
|
struct address_space *mapping = page->mapping;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
spinlock_t *req_lock;
|
|
||||||
struct nfs_page *req;
|
struct nfs_page *req;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -1448,18 +1445,17 @@ int nfs_set_page_dirty(struct page *page)
|
|||||||
inode = mapping->host;
|
inode = mapping->host;
|
||||||
if (!inode)
|
if (!inode)
|
||||||
goto out_raced;
|
goto out_raced;
|
||||||
req_lock = &NFS_I(inode)->req_lock;
|
spin_lock(&inode->i_lock);
|
||||||
spin_lock(req_lock);
|
|
||||||
req = nfs_page_find_request_locked(page);
|
req = nfs_page_find_request_locked(page);
|
||||||
if (req != NULL) {
|
if (req != NULL) {
|
||||||
/* Mark any existing write requests for flushing */
|
/* Mark any existing write requests for flushing */
|
||||||
ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
|
ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
|
||||||
spin_unlock(req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = __set_page_dirty_nobuffers(page);
|
ret = __set_page_dirty_nobuffers(page);
|
||||||
spin_unlock(req_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return ret;
|
return ret;
|
||||||
out_raced:
|
out_raced:
|
||||||
return !TestSetPageDirty(page);
|
return !TestSetPageDirty(page);
|
||||||
|
@ -156,7 +156,6 @@ struct nfs_inode {
|
|||||||
/*
|
/*
|
||||||
* This is the list of dirty unwritten pages.
|
* This is the list of dirty unwritten pages.
|
||||||
*/
|
*/
|
||||||
spinlock_t req_lock;
|
|
||||||
struct radix_tree_root nfs_page_tree;
|
struct radix_tree_root nfs_page_tree;
|
||||||
|
|
||||||
unsigned long ncommit,
|
unsigned long ncommit,
|
||||||
|
Loading…
Reference in New Issue
Block a user