mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
vhost/vsock: support MSG_EOR bit processing
'MSG_EOR' handling has similar logic as 'MSG_EOM' - if bit present in packet's header, reset it to 0. Then restore it back if packet processing wasn't completed. Instead of bool variable for each flag, bit mask variable was added: it has logical OR of 'MSG_EOR' and 'MSG_EOM' if needed, to restore flags, this variable is ORed with flags field of packet. Signed-off-by: Arseny Krasnov <arseny.krasnov@kaspersky.com> Link: https://lore.kernel.org/r/20210903123238.3273526-1-arseny.krasnov@kaspersky.com Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
This commit is contained in:
parent
41116599a0
commit
1af7e55511
@ -114,7 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
|
||||
size_t nbytes;
|
||||
size_t iov_len, payload_len;
|
||||
int head;
|
||||
bool restore_flag = false;
|
||||
u32 flags_to_restore = 0;
|
||||
|
||||
spin_lock_bh(&vsock->send_pkt_list_lock);
|
||||
if (list_empty(&vsock->send_pkt_list)) {
|
||||
@ -179,15 +179,20 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
|
||||
* created dynamically and are initialized with header
|
||||
* of current packet(except length). But in case of
|
||||
* SOCK_SEQPACKET, we also must clear message delimeter
|
||||
* bit(VIRTIO_VSOCK_SEQ_EOM). Otherwise, instead of one
|
||||
* packet with delimeter(which marks end of message),
|
||||
* there will be sequence of packets with delimeter
|
||||
* bit set. After initialized header will be copied to
|
||||
* rx buffer, this bit will be restored.
|
||||
* bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
|
||||
* (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
|
||||
* there will be sequence of packets with these
|
||||
* bits set. After initialized header will be copied to
|
||||
* rx buffer, these required bits will be restored.
|
||||
*/
|
||||
if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
|
||||
pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
|
||||
restore_flag = true;
|
||||
flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
|
||||
|
||||
if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
|
||||
pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
|
||||
flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -224,8 +229,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
|
||||
* to send it with the next available buffer.
|
||||
*/
|
||||
if (pkt->off < pkt->len) {
|
||||
if (restore_flag)
|
||||
pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
|
||||
pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
|
||||
|
||||
/* We are queueing the same virtio_vsock_pkt to handle
|
||||
* the remaining bytes, and we want to deliver it
|
||||
|
Loading…
Reference in New Issue
Block a user