forked from Minki/linux
Merge branch 'Introduce-sendpage_ok-to-detect-misused-sendpage-in-network-related-drivers'
Coly Li says: ==================== Introduce sendpage_ok() to detect misused sendpage in network related drivers As Sagi Grimberg suggested, the original fix is refind to a more common inline routine: static inline bool sendpage_ok(struct page *page) { return (!PageSlab(page) && page_count(page) >= 1); } If sendpage_ok() returns true, the checking page can be handled by the concrete zero-copy sendpage method in network layer. The v10 series has 7 patches, fixes a WARN_ONCE() usage from v9 series, - The 1st patch in this series introduces sendpage_ok() in header file include/linux/net.h. - The 2nd patch adds WARN_ONCE() for improper zero-copy send in kernel_sendpage(). - The 3rd patch fixes the page checking issue in nvme-over-tcp driver. - The 4th patch adds page_count check by using sendpage_ok() in do_tcp_sendpages() as Eric Dumazet suggested. - The 5th and 6th patches just replace existing open coded checks with the inline sendpage_ok() routine. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e7d4005d48
@ -1553,7 +1553,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
|
||||
* put_page(); and would cause either a VM_BUG directly, or
|
||||
* __page_cache_release a page that would actually still be referenced
|
||||
* by someone, leading to some obscure delayed Oops somewhere else. */
|
||||
if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
|
||||
if (drbd_disable_sendpage || !sendpage_ok(page))
|
||||
return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
|
||||
|
||||
msg_flags |= MSG_NOSIGNAL;
|
||||
|
@ -913,12 +913,11 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
||||
else
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
|
||||
/* can't zcopy slab pages */
|
||||
if (unlikely(PageSlab(page))) {
|
||||
ret = sock_no_sendpage(queue->sock, page, offset, len,
|
||||
if (sendpage_ok(page)) {
|
||||
ret = kernel_sendpage(queue->sock, page, offset, len,
|
||||
flags);
|
||||
} else {
|
||||
ret = kernel_sendpage(queue->sock, page, offset, len,
|
||||
ret = sock_no_sendpage(queue->sock, page, offset, len,
|
||||
flags);
|
||||
}
|
||||
if (ret <= 0)
|
||||
|
@ -128,7 +128,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
|
||||
* coalescing neighboring slab objects into a single frag which
|
||||
* triggers one of hardened usercopy checks.
|
||||
*/
|
||||
if (!recv && page_count(sg_page(sg)) >= 1 && !PageSlab(sg_page(sg)))
|
||||
if (!recv && sendpage_ok(sg_page(sg)))
|
||||
return;
|
||||
|
||||
if (recv) {
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/once.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sockptr.h>
|
||||
|
||||
#include <uapi/linux/net.h>
|
||||
@ -286,6 +287,21 @@ do { \
|
||||
#define net_get_random_once_wait(buf, nbytes) \
|
||||
get_random_once_wait((buf), (nbytes))
|
||||
|
||||
/*
|
||||
* E.g. XFS meta- & log-data is in slab pages, or bcache meta
|
||||
* data pages, or other high order pages allocated by
|
||||
* __get_free_pages() without __GFP_COMP, which have a page_count
|
||||
* of 0 and/or have PageSlab() set. We cannot use send_page for
|
||||
* those, as that does get_page(); put_page(); and would cause
|
||||
* either a VM_BUG directly, or __page_cache_release a page that
|
||||
* would actually still be referenced by someone, leading to some
|
||||
* obscure delayed Oops somewhere else.
|
||||
*/
|
||||
static inline bool sendpage_ok(struct page *page)
|
||||
{
|
||||
return !PageSlab(page) && page_count(page) >= 1;
|
||||
}
|
||||
|
||||
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
|
||||
size_t num, size_t len);
|
||||
int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
|
||||
|
@ -575,7 +575,7 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
* coalescing neighboring slab objects into a single frag which
|
||||
* triggers one of hardened usercopy checks.
|
||||
*/
|
||||
if (page_count(page) >= 1 && !PageSlab(page))
|
||||
if (sendpage_ok(page))
|
||||
sendpage = sock->ops->sendpage;
|
||||
else
|
||||
sendpage = sock_no_sendpage;
|
||||
|
@ -970,7 +970,8 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
|
||||
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DEBUG_VM) &&
|
||||
WARN_ONCE(PageSlab(page), "page must not be a Slab one"))
|
||||
WARN_ONCE(!sendpage_ok(page),
|
||||
"page must not be a Slab one and have page_count > 0"))
|
||||
return -EINVAL;
|
||||
|
||||
/* Wait for a connection to finish. One exception is TCP Fast Open
|
||||
|
@ -3638,9 +3638,11 @@ EXPORT_SYMBOL(kernel_getpeername);
|
||||
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
if (sock->ops->sendpage)
|
||||
if (sock->ops->sendpage) {
|
||||
/* Warn in case the improper page to zero-copy send */
|
||||
WARN_ONCE(!sendpage_ok(page), "improper page for zero-copy send");
|
||||
return sock->ops->sendpage(sock, page, offset, size, flags);
|
||||
|
||||
}
|
||||
return sock_no_sendpage(sock, page, offset, size, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_sendpage);
|
||||
|
Loading…
Reference in New Issue
Block a user