mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 01:49:46 +00:00
io_uring/net: commit partial buffers on retry
commit41b70df5b3upstream. Ring provided buffers are potentially only valid within the single execution context in which they were acquired. io_uring deals with this and invalidates them on retry. But on the networking side, if MSG_WAITALL is set, or if the socket is of the streaming type and too little was processed, then it will hang on to the buffer rather than recycle or commit it. This is problematic for two reasons: 1) If someone unregisters the provided buffer ring before a later retry, then the req->buf_list will no longer be valid. 2) If multiple sockers are using the same buffer group, then multiple receives can consume the same memory. This can cause data corruption in the application, as either receive could land in the same userspace buffer. Fix this by disallowing partial retries from pinning a provided buffer across multiple executions, if ring provided buffers are used. Cc: stable@vger.kernel.org Reported-by: pt x <superman.xpt@gmail.com> Fixes:c56e022c0a("io_uring: add support for user mapped provided buffer ring") Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
a69a9b53c5
commit
2eb7937b5f
@@ -477,6 +477,15 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
|
||||
return nbufs;
|
||||
}
|
||||
|
||||
static int io_net_kbuf_recyle(struct io_kiocb *req,
|
||||
struct io_async_msghdr *kmsg, int len)
|
||||
{
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
if (req->flags & REQ_F_BUFFERS_COMMIT)
|
||||
io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len));
|
||||
return IOU_RETRY;
|
||||
}
|
||||
|
||||
static inline bool io_send_finish(struct io_kiocb *req, int *ret,
|
||||
struct io_async_msghdr *kmsg,
|
||||
unsigned issue_flags)
|
||||
@@ -545,8 +554,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
kmsg->msg.msg_controllen = 0;
|
||||
kmsg->msg.msg_control = NULL;
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return -EAGAIN;
|
||||
return io_net_kbuf_recyle(req, kmsg, ret);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
@@ -657,8 +665,7 @@ retry_bundle:
|
||||
sr->len -= ret;
|
||||
sr->buf += ret;
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return -EAGAIN;
|
||||
return io_net_kbuf_recyle(req, kmsg, ret);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
@@ -1026,8 +1033,7 @@ retry_multishot:
|
||||
}
|
||||
if (ret > 0 && io_net_retry(sock, flags)) {
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return IOU_RETRY;
|
||||
return io_net_kbuf_recyle(req, kmsg, ret);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
@@ -1168,8 +1174,7 @@ retry_multishot:
|
||||
sr->len -= ret;
|
||||
sr->buf += ret;
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return -EAGAIN;
|
||||
return io_net_kbuf_recyle(req, kmsg, ret);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
@@ -1450,8 +1455,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
|
||||
zc->len -= ret;
|
||||
zc->buf += ret;
|
||||
zc->done_io += ret;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return -EAGAIN;
|
||||
return io_net_kbuf_recyle(req, kmsg, ret);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
@@ -1521,8 +1525,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
|
||||
|
||||
if (ret > 0 && io_net_retry(sock, flags)) {
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return -EAGAIN;
|
||||
return io_net_kbuf_recyle(req, kmsg, ret);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
|
||||
Reference in New Issue
Block a user