xen: XSA-432 security patches for v6.5

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCZMtRjQAKCRCAXGG7T9hj
 vmd6AQCv9TM0Ln9n+3VNnI1RkJcY/OxRafM0LT+Hn4ZQopvmrAD/b3oWQ7TMy8On
 FtIl/E/hczpd7xC046ArFBBJdTtwHAE=
 =Ti7r
 -----END PGP SIGNATURE-----

Merge tag 'xsa432-6.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen netback buffer overflow fix from Juergen Gross:
 "The fix for XSA-423 added logic to Linux'es netback driver to deal
  with a frontend splitting a packet in a way such that not all of the
  headers would come in one piece.

  Unfortunately the logic introduced there didn't account for the
  extreme case of the entire packet being split into as many pieces as
  permitted by the protocol, yet still being smaller than the area
  that's specially dealt with to keep all (possible) headers together.

  Such an unusual packet would therefore trigger a buffer overrun in the
  driver"

* tag 'xsa432-6.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/netback: Fix buffer overrun triggered by unusual packet
This commit is contained in:
Linus Torvalds 2023-08-07 17:25:19 -07:00
commit da703fe941

View File

@ -396,7 +396,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
struct xen_netif_tx_request *txp = first;
nr_slots = shinfo->nr_frags + 1;
nr_slots = shinfo->nr_frags + frag_overflow + 1;
copy_count(skb) = 0;
XENVIF_TX_CB(skb)->split_mask = 0;
@ -462,8 +462,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}
}
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, gop++) {
for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
shinfo->nr_frags++, gop++, nr_slots--) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp,
@ -476,12 +476,12 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
txp++;
}
if (frag_overflow) {
if (nr_slots > 0) {
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
@ -492,6 +492,11 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}
skb_shinfo(skb)->frag_list = nskb;
} else if (nskb) {
/* A frag_list skb was allocated but it is no longer needed
* because enough slots were converted to copy ops above.
*/
kfree_skb(nskb);
}
(*copy_ops) = cop - queue->tx_copy_ops;