From 1af7e55511fe194a07f3fa4e77b5b9d10bdd9208 Mon Sep 17 00:00:00 2001 From: Arseny Krasnov Date: Fri, 3 Sep 2021 15:32:35 +0300 Subject: vhost/vsock: support MSG_EOR bit processing 'MSG_EOR' handling has similar logic as 'MSG_EOM' - if bit present in packet's header, reset it to 0. Then restore it back if packet processing wasn't completed. Instead of bool variable for each flag, bit mask variable was added: it has logical OR of 'MSG_EOR' and 'MSG_EOM' if needed, to restore flags, this variable is ORed with flags field of packet. Signed-off-by: Arseny Krasnov Link: https://lore.kernel.org/r/20210903123238.3273526-1-arseny.krasnov@kaspersky.com Signed-off-by: Michael S. Tsirkin Reviewed-by: Stefano Garzarella --- drivers/vhost/vsock.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers/vhost/vsock.c') diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index feaf650affbe..938aefbc75ec 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -114,7 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, size_t nbytes; size_t iov_len, payload_len; int head; - bool restore_flag = false; + u32 flags_to_restore = 0; spin_lock_bh(&vsock->send_pkt_list_lock); if (list_empty(&vsock->send_pkt_list)) { @@ -179,15 +179,20 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, * created dynamically and are initialized with header * of current packet(except length). But in case of * SOCK_SEQPACKET, we also must clear message delimeter - * bit(VIRTIO_VSOCK_SEQ_EOM). Otherwise, instead of one - * packet with delimeter(which marks end of message), - * there will be sequence of packets with delimeter - * bit set. After initialized header will be copied to - * rx buffer, this bit will be restored. + * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit + * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise, + * there will be sequence of packets with these + * bits set. After initialized header will be copied to + * rx buffer, these required bits will be restored. */ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) { pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); - restore_flag = true; + flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM; + + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) { + pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); + flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR; + } } } @@ -224,8 +229,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, * to send it with the next available buffer. */ if (pkt->off < pkt->len) { - if (restore_flag) - pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); + pkt->hdr.flags |= cpu_to_le32(flags_to_restore); /* We are queueing the same virtio_vsock_pkt to handle * the remaining bytes, and we want to deliver it -- cgit