mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
netfs: Use bh-disabling spinlocks for rreq->lock
Use bh-disabling spinlocks when accessing rreq->lock because, in the future, it may be twiddled from softirq context when cleanup is driven from cache backend DIO completion. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20240814203850.2240469-12-dhowells@redhat.com/ # v2 Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
24c90a79f6
commit
22de489d1e
@ -473,7 +473,7 @@ reassess_streams:
|
||||
|
||||
cancel:
|
||||
/* Remove if completely consumed. */
|
||||
spin_lock(&wreq->lock);
|
||||
spin_lock_bh(&wreq->lock);
|
||||
|
||||
remove = front;
|
||||
list_del_init(&front->rreq_link);
|
||||
@ -489,7 +489,7 @@ reassess_streams:
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&wreq->lock);
|
||||
spin_unlock_bh(&wreq->lock);
|
||||
netfs_put_subrequest(remove, false,
|
||||
notes & SAW_FAILURE ?
|
||||
netfs_sreq_trace_put_cancel :
|
||||
|
@ -191,7 +191,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
|
||||
* the list. The collector only goes nextwards and uses the lock to
|
||||
* remove entries off of the front.
|
||||
*/
|
||||
spin_lock(&wreq->lock);
|
||||
spin_lock_bh(&wreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &stream->subrequests);
|
||||
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
|
||||
stream->front = subreq;
|
||||
@ -202,7 +202,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&wreq->lock);
|
||||
spin_unlock_bh(&wreq->lock);
|
||||
|
||||
stream->construct = subreq;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user