unix: handle CQ overflow in iou ring (#3991)
When there are more than 128 concurrent cq completions the CQ ring overflows as signaled via the `UV__IORING_SQ_CQ_OVERFLOW`. If this happens we have to enter the kernel to get the remaining items.
This commit is contained in:
parent
6ad347fae4
commit
30fc896cc1
@ -159,6 +159,7 @@ enum {
|
||||
|
||||
enum {
|
||||
UV__IORING_SQ_NEED_WAKEUP = 1u,
|
||||
UV__IORING_SQ_CQ_OVERFLOW = 2u,
|
||||
};
|
||||
|
||||
struct uv__io_cqring_offsets {
|
||||
@ -891,7 +892,9 @@ static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) {
|
||||
uint32_t tail;
|
||||
uint32_t mask;
|
||||
uint32_t i;
|
||||
uint32_t flags;
|
||||
int nevents;
|
||||
int rc;
|
||||
|
||||
head = *iou->cqhead;
|
||||
tail = atomic_load_explicit((_Atomic uint32_t*) iou->cqtail,
|
||||
@ -931,6 +934,21 @@ static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) {
|
||||
tail,
|
||||
memory_order_release);
|
||||
|
||||
/* Check whether CQE's overflowed, if so enter the kernel to make them
|
||||
* available. Don't grab them immediately but in the next loop iteration to
|
||||
* avoid loop starvation. */
|
||||
flags = atomic_load_explicit((_Atomic uint32_t*) iou->sqflags,
|
||||
memory_order_acquire);
|
||||
|
||||
if (flags & UV__IORING_SQ_CQ_OVERFLOW) {
|
||||
do
|
||||
rc = uv__io_uring_enter(iou->ringfd, 0, 0, UV__IORING_ENTER_GETEVENTS);
|
||||
while (rc == -1 && errno == EINTR);
|
||||
|
||||
if (rc < 0)
|
||||
perror("libuv: io_uring_enter(getevents)"); /* Can't happen. */
|
||||
}
|
||||
|
||||
uv__metrics_inc_events(loop, nevents);
|
||||
if (uv__get_internal_fields(loop)->current_timeout == 0)
|
||||
uv__metrics_inc_events_waiting(loop, nevents);
|
||||
|
||||
@ -662,6 +662,15 @@ static void stat_cb(uv_fs_t* req) {
|
||||
ASSERT(!req->ptr);
|
||||
}
|
||||
|
||||
static void stat_batch_cb(uv_fs_t* req) {
|
||||
ASSERT(req->fs_type == UV_FS_STAT || req->fs_type == UV_FS_LSTAT);
|
||||
ASSERT(req->result == 0);
|
||||
ASSERT(req->ptr);
|
||||
stat_cb_count++;
|
||||
uv_fs_req_cleanup(req);
|
||||
ASSERT(!req->ptr);
|
||||
}
|
||||
|
||||
|
||||
static void sendfile_cb(uv_fs_t* req) {
|
||||
ASSERT(req == &sendfile_req);
|
||||
@ -4540,3 +4549,27 @@ TEST_IMPL(fs_get_system_error) {
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
TEST_IMPL(fs_stat_batch_multiple) {
|
||||
uv_fs_t req[300];
|
||||
int r;
|
||||
int i;
|
||||
|
||||
rmdir("test_dir");
|
||||
|
||||
r = uv_fs_mkdir(NULL, &mkdir_req, "test_dir", 0755, NULL);
|
||||
ASSERT_EQ(r, 0);
|
||||
|
||||
loop = uv_default_loop();
|
||||
|
||||
for (i = 0; i < (int) ARRAY_SIZE(req); ++i) {
|
||||
r = uv_fs_stat(loop, &req[i], "test_dir", stat_batch_cb);
|
||||
ASSERT_EQ(r, 0);
|
||||
}
|
||||
|
||||
uv_run(loop, UV_RUN_DEFAULT);
|
||||
ASSERT_EQ(stat_cb_count, ARRAY_SIZE(req));
|
||||
|
||||
MAKE_VALGRIND_HAPPY(loop);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -384,6 +384,7 @@ TEST_DECLARE (fs_futime)
|
||||
TEST_DECLARE (fs_lutime)
|
||||
TEST_DECLARE (fs_file_open_append)
|
||||
TEST_DECLARE (fs_statfs)
|
||||
TEST_DECLARE (fs_stat_batch_multiple)
|
||||
TEST_DECLARE (fs_stat_missing_path)
|
||||
TEST_DECLARE (fs_read_bufs)
|
||||
TEST_DECLARE (fs_read_file_eof)
|
||||
@ -1070,6 +1071,7 @@ TASK_LIST_START
|
||||
TEST_ENTRY (fs_fd_hash)
|
||||
#endif
|
||||
TEST_ENTRY (fs_statfs)
|
||||
TEST_ENTRY (fs_stat_batch_multiple)
|
||||
TEST_ENTRY (fs_stat_missing_path)
|
||||
TEST_ENTRY (fs_read_bufs)
|
||||
TEST_ENTRY (fs_read_file_eof)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user