win,pipe: fixes in uv_pipe_connect()
Make unices and windows consistent when closing a pipe while it's connecting so they all return `UV_ECANCELED`. Avoid race condition between `pipe_connect_thread_proc()` and `uv_close()` when accessing `handle->name`. Fixes: https://github.com/libuv/libuv/issues/3578
This commit is contained in:
parent
96637d032f
commit
3706c4f855
@ -377,6 +377,7 @@ typedef struct {
|
||||
ULONG_PTR result; /* overlapped.Internal is reused to hold the result */\
|
||||
HANDLE pipeHandle; \
|
||||
DWORD duplex_flags; \
|
||||
WCHAR* name; \
|
||||
} connect; \
|
||||
} u; \
|
||||
struct uv_req_s* next_req;
|
||||
|
||||
@ -792,15 +792,17 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
|
||||
|
||||
/* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait
|
||||
* up to 30 seconds for the pipe to become available with WaitNamedPipe. */
|
||||
while (WaitNamedPipeW(handle->name, 30000)) {
|
||||
while (WaitNamedPipeW(req->u.connect.name, 30000)) {
|
||||
/* The pipe is now available, try to connect. */
|
||||
pipeHandle = open_named_pipe(handle->name, &duplex_flags);
|
||||
pipeHandle = open_named_pipe(req->u.connect.name, &duplex_flags);
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE)
|
||||
break;
|
||||
|
||||
SwitchToThread();
|
||||
}
|
||||
|
||||
uv__free(req->u.connect.name);
|
||||
req->u.connect.name = NULL;
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE) {
|
||||
SET_REQ_SUCCESS(req);
|
||||
req->u.connect.pipeHandle = pipeHandle;
|
||||
@ -828,6 +830,7 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
|
||||
req->cb = cb;
|
||||
req->u.connect.pipeHandle = INVALID_HANDLE_VALUE;
|
||||
req->u.connect.duplex_flags = 0;
|
||||
req->u.connect.name = NULL;
|
||||
|
||||
if (handle->flags & UV_HANDLE_PIPESERVER) {
|
||||
err = ERROR_INVALID_PARAMETER;
|
||||
@ -859,10 +862,19 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
|
||||
pipeHandle = open_named_pipe(handle->name, &duplex_flags);
|
||||
if (pipeHandle == INVALID_HANDLE_VALUE) {
|
||||
if (GetLastError() == ERROR_PIPE_BUSY) {
|
||||
req->u.connect.name = uv__malloc(nameSize);
|
||||
if (!req->u.connect.name) {
|
||||
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
|
||||
}
|
||||
|
||||
memcpy(req->u.connect.name, handle->name, nameSize);
|
||||
|
||||
/* Wait for the server to make a pipe instance available. */
|
||||
if (!QueueUserWorkItem(&pipe_connect_thread_proc,
|
||||
req,
|
||||
WT_EXECUTELONGFUNCTION)) {
|
||||
uv__free(req->u.connect.name);
|
||||
req->u.connect.name = NULL;
|
||||
err = GetLastError();
|
||||
goto error;
|
||||
}
|
||||
@ -2131,6 +2143,9 @@ void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
if (REQ_SUCCESS(req)) {
|
||||
pipeHandle = req->u.connect.pipeHandle;
|
||||
duplex_flags = req->u.connect.duplex_flags;
|
||||
if (handle->flags & UV_HANDLE_CLOSING)
|
||||
err = UV_ECANCELED;
|
||||
else
|
||||
err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags);
|
||||
if (err)
|
||||
CloseHandle(pipeHandle);
|
||||
|
||||
@ -194,6 +194,7 @@ TEST_DECLARE (udp_try_send)
|
||||
TEST_DECLARE (pipe_bind_error_addrinuse)
|
||||
TEST_DECLARE (pipe_bind_error_addrnotavail)
|
||||
TEST_DECLARE (pipe_bind_error_inval)
|
||||
TEST_DECLARE (pipe_connect_close_multiple)
|
||||
TEST_DECLARE (pipe_connect_multiple)
|
||||
TEST_DECLARE (pipe_listen_without_bind)
|
||||
TEST_DECLARE (pipe_bind_or_listen_error_after_close)
|
||||
@ -784,6 +785,7 @@ TASK_LIST_START
|
||||
TEST_ENTRY (pipe_bind_error_addrinuse)
|
||||
TEST_ENTRY (pipe_bind_error_addrnotavail)
|
||||
TEST_ENTRY (pipe_bind_error_inval)
|
||||
TEST_ENTRY (pipe_connect_close_multiple)
|
||||
TEST_ENTRY (pipe_connect_multiple)
|
||||
TEST_ENTRY (pipe_listen_without_bind)
|
||||
TEST_ENTRY (pipe_bind_or_listen_error_after_close)
|
||||
|
||||
@ -105,3 +105,74 @@ TEST_IMPL(pipe_connect_multiple) {
|
||||
MAKE_VALGRIND_HAPPY();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void connection_cb2(uv_stream_t* server, int status) {
|
||||
int r;
|
||||
uv_pipe_t* conn;
|
||||
ASSERT_EQ(status, 0);
|
||||
|
||||
conn = &connections[connection_cb_called];
|
||||
r = uv_pipe_init(server->loop, conn, 0);
|
||||
ASSERT_EQ(r, 0);
|
||||
|
||||
r = uv_accept(server, (uv_stream_t*)conn);
|
||||
ASSERT_EQ(r, 0);
|
||||
|
||||
uv_close((uv_handle_t*)conn, NULL);
|
||||
if (++connection_cb_called == NUM_CLIENTS &&
|
||||
connect_cb_called == NUM_CLIENTS) {
|
||||
uv_close((uv_handle_t*)&server_handle, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void connect_cb2(uv_connect_t* connect_req, int status) {
|
||||
ASSERT_EQ(status, UV_ECANCELED);
|
||||
if (++connect_cb_called == NUM_CLIENTS &&
|
||||
connection_cb_called == NUM_CLIENTS) {
|
||||
uv_close((uv_handle_t*)&server_handle, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST_IMPL(pipe_connect_close_multiple) {
|
||||
#if defined(NO_SELF_CONNECT)
|
||||
RETURN_SKIP(NO_SELF_CONNECT);
|
||||
#endif
|
||||
int i;
|
||||
int r;
|
||||
uv_loop_t* loop;
|
||||
|
||||
loop = uv_default_loop();
|
||||
|
||||
r = uv_pipe_init(loop, &server_handle, 0);
|
||||
ASSERT_EQ(r, 0);
|
||||
|
||||
r = uv_pipe_bind(&server_handle, TEST_PIPENAME);
|
||||
ASSERT_EQ(r, 0);
|
||||
|
||||
r = uv_listen((uv_stream_t*)&server_handle, 128, connection_cb2);
|
||||
ASSERT_EQ(r, 0);
|
||||
|
||||
for (i = 0; i < NUM_CLIENTS; i++) {
|
||||
r = uv_pipe_init(loop, &clients[i].pipe_handle, 0);
|
||||
ASSERT_EQ(r, 0);
|
||||
uv_pipe_connect(&clients[i].conn_req,
|
||||
&clients[i].pipe_handle,
|
||||
TEST_PIPENAME,
|
||||
connect_cb2);
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_CLIENTS; i++) {
|
||||
uv_close((uv_handle_t*)&clients[i].pipe_handle, NULL);
|
||||
}
|
||||
|
||||
|
||||
uv_run(loop, UV_RUN_DEFAULT);
|
||||
|
||||
ASSERT_EQ(connection_cb_called, NUM_CLIENTS);
|
||||
ASSERT_EQ(connect_cb_called, NUM_CLIENTS);
|
||||
|
||||
MAKE_VALGRIND_HAPPY();
|
||||
return 0;
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user