uv-win: refactor request processing

This commit is contained in:
Bert Belder 2011-07-13 21:49:15 +02:00
parent 9c19391536
commit 28650425e2
2 changed files with 399 additions and 324 deletions

View File

@ -140,8 +140,8 @@ typedef enum {
UV_CHECK,
UV_IDLE,
UV_ASYNC,
UV_ARES,
UV_ARES_TASK,
UV_ARES_EVENT,
UV_GETADDRINFO
} uv_handle_type;
@ -152,7 +152,11 @@ typedef enum {
UV_READ,
UV_WRITE,
UV_SHUTDOWN,
UV_WAKEUP
UV_WAKEUP,
/* TODO: remove the req suffix */
UV_ARES_EVENT_REQ,
UV_ARES_CLEANUP_REQ,
UV_GETADDRINFO_REQ
} uv_req_type;

View File

@ -239,8 +239,8 @@ struct uv_ares_action_s {
int write;
};
void uv_ares_process(uv_ares_action_t* handle, uv_req_t* req);
void uv_ares_task_cleanup(uv_ares_task_t* handle, uv_req_t* req);
void uv_process_ares_event_req(uv_ares_action_t* handle, uv_req_t* req);
void uv_process_ares_cleanup_req(uv_ares_task_t* handle, uv_req_t* req);
void uv_ares_poll(uv_timer_t* handle, int status);
static void close_pipe(uv_pipe_t* handle, int* status, uv_err_t* err);
@ -263,7 +263,7 @@ static uv_ares_channel_t uv_ares_data = { NULL, 0 };
/* getaddrinfo integration */
static void uv_getaddrinfo_done(uv_getaddrinfo_t* handle, uv_req_t* req);
static void uv_process_getaddrinfo_req(uv_getaddrinfo_t* handle, uv_req_t* req);
/* adjust size value to be multiple of 4. Use to keep pointer aligned */
/* Do we need different versions of this for different architectures? */
#define ALIGNED_SIZE(X) ((((X) + 3) >> 2) << 2)
@ -1558,7 +1558,18 @@ int uv_shutdown(uv_req_t* req) {
}
static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
#define DECREASE_PENDING_REQ_COUNT(handle) \
do { \
handle->reqs_pending--; \
\
if (handle->flags & UV_HANDLE_CLOSING && \
handle->reqs_pending == 0) { \
uv_want_endgame((uv_handle_t*)handle); \
} \
} while (0)
static void uv_process_tcp_read_req(uv_tcp_t* handle, uv_req_t* req) {
DWORD bytes, flags, err;
uv_buf_t buf;
@ -1567,160 +1578,173 @@ static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
/* Mark the request non-pending */
req->flags &= ~UV_REQ_PENDING;
switch (req->type) {
case UV_WRITE:
handle->write_queue_size -= req->queued_bytes;
if (req->cb) {
uv_last_error_ = req->error;
((uv_write_cb)req->cb)(req, uv_last_error_.code == UV_OK ? 0 : -1);
}
handle->write_reqs_pending--;
if (handle->write_reqs_pending == 0 &&
handle->flags & UV_HANDLE_SHUTTING) {
uv_want_endgame((uv_handle_t*)handle);
}
break;
case UV_READ:
if (req->error.code != UV_OK) {
/* An error occurred doing the 0-read. */
if (!(handle->flags & UV_HANDLE_READING)) {
break;
}
/* Stop reading and report error. */
handle->flags &= ~UV_HANDLE_READING;
uv_last_error_ = req->error;
buf.base = 0;
buf.len = 0;
handle->read_cb((uv_stream_t*)handle, -1, buf);
break;
}
/* Do nonblocking reads until the buffer is empty */
while (handle->flags & UV_HANDLE_READING) {
buf = handle->alloc_cb((uv_stream_t*)handle, 65536);
assert(buf.len > 0);
flags = 0;
if (WSARecv(handle->socket,
(WSABUF*)&buf,
1,
&bytes,
&flags,
NULL,
NULL) != SOCKET_ERROR) {
if (bytes > 0) {
/* Successful read */
handle->read_cb((uv_stream_t*)handle, bytes, buf);
/* Read again only if bytes == buf.len */
if (bytes < buf.len) {
break;
}
} else {
/* Connection closed */
handle->flags &= ~UV_HANDLE_READING;
handle->flags |= UV_HANDLE_EOF;
uv_last_error_.code = UV_EOF;
uv_last_error_.sys_errno_ = ERROR_SUCCESS;
handle->read_cb((uv_stream_t*)handle, -1, buf);
if (req->error.code != UV_OK) {
/* An error occurred doing the 0-read. */
if ((handle->flags & UV_HANDLE_READING)) {
handle->flags &= ~UV_HANDLE_READING;
uv_last_error_ = req->error;
buf.base = 0;
buf.len = 0;
handle->read_cb((uv_stream_t*)handle, -1, buf);
}
} else {
/* Do nonblocking reads until the buffer is empty */
while (handle->flags & UV_HANDLE_READING) {
buf = handle->alloc_cb((uv_stream_t*)handle, 65536);
assert(buf.len > 0);
flags = 0;
if (WSARecv(handle->socket,
(WSABUF*)&buf,
1,
&bytes,
&flags,
NULL,
NULL) != SOCKET_ERROR) {
if (bytes > 0) {
/* Successful read */
handle->read_cb((uv_stream_t*)handle, bytes, buf);
/* Read again only if bytes == buf.len */
if (bytes < buf.len) {
break;
}
} else {
err = WSAGetLastError();
if (err == WSAEWOULDBLOCK) {
/* Read buffer was completely empty, report a 0-byte read. */
uv_set_sys_error(WSAEWOULDBLOCK);
handle->read_cb((uv_stream_t*)handle, 0, buf);
} else {
/* Ouch! serious error. */
uv_set_sys_error(err);
handle->read_cb((uv_stream_t*)handle, -1, buf);
}
/* Connection closed */
handle->flags &= ~UV_HANDLE_READING;
handle->flags |= UV_HANDLE_EOF;
uv_last_error_.code = UV_EOF;
uv_last_error_.sys_errno_ = ERROR_SUCCESS;
handle->read_cb((uv_stream_t*)handle, -1, buf);
break;
}
}
/* Post another 0-read if still reading and not closing. */
if (handle->flags & UV_HANDLE_READING) {
uv_tcp_queue_read(handle);
}
break;
case UV_ACCEPT:
/* If handle->accepted_socket is not a valid socket, then */
/* uv_queue_accept must have failed. This is a serious error. We stop */
/* accepting connections and report this error to the connection */
/* callback. */
if (handle->accept_socket == INVALID_SOCKET) {
if (!(handle->flags & UV_HANDLE_LISTENING)) {
break;
}
handle->flags &= ~UV_HANDLE_LISTENING;
if (handle->connection_cb) {
uv_last_error_ = req->error;
handle->connection_cb((uv_handle_t*)handle, -1);
} else {
err = WSAGetLastError();
if (err == WSAEWOULDBLOCK) {
/* Read buffer was completely empty, report a 0-byte read. */
uv_set_sys_error(WSAEWOULDBLOCK);
handle->read_cb((uv_stream_t*)handle, 0, buf);
} else {
/* Ouch! serious error. */
uv_set_sys_error(err);
handle->read_cb((uv_stream_t*)handle, -1, buf);
}
break;
}
}
if (req->error.code == UV_OK &&
setsockopt(handle->accept_socket,
SOL_SOCKET,
SO_UPDATE_ACCEPT_CONTEXT,
(char*)&handle->socket,
sizeof(handle->socket)) == 0) {
/* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */
if (handle->connection_cb) {
handle->connection_cb((uv_handle_t*)handle, 0);
}
} else {
/* Error related to accepted socket is ignored because the server */
/* socket may still be healthy. If the server socket is broken
/* uv_queue_accept will detect it. */
closesocket(handle->accept_socket);
if (handle->flags & UV_HANDLE_LISTENING) {
uv_tcp_queue_accept(handle);
}
}
break;
case UV_CONNECT:
if (req->cb) {
if (req->error.code == UV_OK) {
if (setsockopt(handle->socket,
SOL_SOCKET,
SO_UPDATE_CONNECT_CONTEXT,
NULL,
0) == 0) {
uv_init_connection((uv_stream_t*)handle);
((uv_connect_cb)req->cb)(req, 0);
} else {
uv_set_sys_error(WSAGetLastError());
((uv_connect_cb)req->cb)(req, -1);
}
} else {
uv_last_error_ = req->error;
((uv_connect_cb)req->cb)(req, -1);
}
}
break;
default:
assert(0);
/* Post another 0-read if still reading and not closing. */
if (handle->flags & UV_HANDLE_READING) {
uv_tcp_queue_read(handle);
}
}
/* The number of pending requests is now down by one */
handle->reqs_pending--;
/* Queue the handle's close callback if it is closing and there are no */
/* more pending requests. */
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
uv_want_endgame((uv_handle_t*)handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
static void uv_pipe_return_req(uv_pipe_t* handle, uv_req_t* req) {
static void uv_process_tcp_write_req(uv_tcp_t* handle, uv_req_t* req) {
DWORD bytes, flags, err;
uv_buf_t buf;
assert(handle->type == UV_TCP);
/* Mark the request non-pending */
req->flags &= ~UV_REQ_PENDING;
handle->write_queue_size -= req->queued_bytes;
if (req->cb) {
uv_last_error_ = req->error;
((uv_write_cb)req->cb)(req, uv_last_error_.code == UV_OK ? 0 : -1);
}
handle->write_reqs_pending--;
if (handle->flags & UV_HANDLE_SHUTTING &&
handle->write_reqs_pending == 0) {
uv_want_endgame((uv_handle_t*)handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
static void uv_process_tcp_accept_req(uv_tcp_t* handle, uv_req_t* req) {
DWORD bytes, flags, err;
uv_buf_t buf;
assert(handle->type == UV_TCP);
/* Mark the request non-pending */
req->flags &= ~UV_REQ_PENDING;
/* If handle->accepted_socket is not a valid socket, then */
/* uv_queue_accept must have failed. This is a serious error. We stop */
/* accepting connections and report this error to the connection */
/* callback. */
if (handle->accept_socket == INVALID_SOCKET) {
if (handle->flags & UV_HANDLE_LISTENING) {
handle->flags &= ~UV_HANDLE_LISTENING;
if (handle->connection_cb) {
uv_last_error_ = req->error;
handle->connection_cb((uv_handle_t*)handle, -1);
}
}
} else if (req->error.code == UV_OK &&
setsockopt(handle->accept_socket,
SOL_SOCKET,
SO_UPDATE_ACCEPT_CONTEXT,
(char*)&handle->socket,
sizeof(handle->socket)) == 0) {
/* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */
if (handle->connection_cb) {
handle->connection_cb((uv_handle_t*)handle, 0);
}
} else {
/* Error related to accepted socket is ignored because the server */
/* socket may still be healthy. If the server socket is broken
/* uv_queue_accept will detect it. */
closesocket(handle->accept_socket);
if (handle->flags & UV_HANDLE_LISTENING) {
uv_tcp_queue_accept(handle);
}
}
DECREASE_PENDING_REQ_COUNT(handle);
}
static void uv_process_tcp_connect_req(uv_tcp_t* handle, uv_req_t* req) {
DWORD bytes, flags, err;
uv_buf_t buf;
assert(handle->type == UV_TCP);
/* Mark the request non-pending */
req->flags &= ~UV_REQ_PENDING;
if (req->cb) {
if (req->error.code == UV_OK) {
if (setsockopt(handle->socket,
SOL_SOCKET,
SO_UPDATE_CONNECT_CONTEXT,
NULL,
0) == 0) {
uv_init_connection((uv_stream_t*)handle);
((uv_connect_cb)req->cb)(req, 0);
} else {
uv_set_sys_error(WSAGetLastError());
((uv_connect_cb)req->cb)(req, -1);
}
} else {
uv_last_error_ = req->error;
((uv_connect_cb)req->cb)(req, -1);
}
}
DECREASE_PENDING_REQ_COUNT(handle);
}
static void uv_process_pipe_read_req(uv_pipe_t* handle, uv_req_t* req) {
DWORD bytes, err, mode;
uv_buf_t buf;
uv_pipe_instance_t* acceptingConn;
@ -1730,148 +1754,173 @@ static void uv_pipe_return_req(uv_pipe_t* handle, uv_req_t* req) {
/* Mark the request non-pending */
req->flags &= ~UV_REQ_PENDING;
switch (req->type) {
case UV_WRITE:
handle->write_queue_size -= req->queued_bytes;
if (req->cb) {
uv_last_error_ = req->error;
((uv_write_cb)req->cb)(req, uv_last_error_.code == UV_OK ? 0 : -1);
}
handle->write_reqs_pending--;
if (handle->write_reqs_pending == 0 &&
handle->flags & UV_HANDLE_SHUTTING) {
uv_want_endgame((uv_handle_t*)handle);
}
break;
if (req->error.code != UV_OK) {
/* An error occurred doing the 0-read. */
if (handle->flags & UV_HANDLE_READING) {
/* Stop reading and report error. */
handle->flags &= ~UV_HANDLE_READING;
uv_last_error_ = req->error;
buf.base = 0;
buf.len = 0;
handle->read_cb((uv_stream_t*)handle, -1, buf);
}
} else {
/*
* Temporarily switch to non-blocking mode.
* This is so that ReadFile doesn't block if the read buffer is empty.
*/
mode = PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_NOWAIT;
if (!SetNamedPipeHandleState(handle->connection->handle, &mode, NULL, NULL)) {
/* We can't continue processing this read. */
handle->flags &= ~UV_HANDLE_READING;
uv_set_sys_error(GetLastError());
buf.base = 0;
buf.len = 0;
handle->read_cb((uv_stream_t*)handle, -1, buf);
}
case UV_READ:
if (req->error.code != UV_OK) {
/* An error occurred doing the 0-read. */
if (!(handle->flags & UV_HANDLE_READING)) {
break;
}
/* Do non-blocking reads until the buffer is empty */
while (handle->flags & UV_HANDLE_READING) {
buf = handle->alloc_cb((uv_stream_t*)handle, 65536);
assert(buf.len > 0);
/* Stop reading and report error. */
handle->flags &= ~UV_HANDLE_READING;
uv_last_error_ = req->error;
buf.base = 0;
buf.len = 0;
handle->read_cb((uv_stream_t*)handle, -1, buf);
break;
}
/* Temporarily switch to non-blocking mode.
* This is so that ReadFile doesn't block if the read buffer is empty.
*/
mode = PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_NOWAIT;
if (!SetNamedPipeHandleState(handle->connection->handle, &mode, NULL, NULL)) {
/* We can't continue processing this read. */
err = GetLastError();
uv_set_sys_error(err);
handle->read_cb((uv_stream_t*)handle, -1, buf);
break;
}
/* Do non-blocking reads until the buffer is empty */
while (handle->flags & UV_HANDLE_READING) {
buf = handle->alloc_cb((uv_stream_t*)handle, 65536);
assert(buf.len > 0);
if (ReadFile(handle->connection->handle,
buf.base,
buf.len,
&bytes,
NULL)) {
if (bytes > 0) {
/* Successful read */
handle->read_cb((uv_stream_t*)handle, bytes, buf);
/* Read again only if bytes == buf.len */
if (bytes < buf.len) {
break;
}
} else {
/* Connection closed */
handle->flags &= ~UV_HANDLE_READING;
handle->flags |= UV_HANDLE_EOF;
uv_last_error_.code = UV_EOF;
uv_last_error_.sys_errno_ = ERROR_SUCCESS;
handle->read_cb((uv_stream_t*)handle, -1, buf);
if (ReadFile(handle->connection->handle,
buf.base,
buf.len,
&bytes,
NULL)) {
if (bytes > 0) {
/* Successful read */
handle->read_cb((uv_stream_t*)handle, bytes, buf);
/* Read again only if bytes == buf.len */
if (bytes < buf.len) {
break;
}
} else {
err = GetLastError();
if (err == ERROR_NO_DATA) {
/* Read buffer was completely empty, report a 0-byte read. */
uv_set_sys_error(UV_EAGAIN);
handle->read_cb((uv_stream_t*)handle, 0, buf);
} else {
/* Ouch! serious error. */
uv_set_sys_error(err);
handle->read_cb((uv_stream_t*)handle, -1, buf);
}
break;
}
}
if (handle->flags & UV_HANDLE_READING) {
/* Switch back to blocking mode so that we can use IOCP for 0-reads */
mode = PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT;
if (!SetNamedPipeHandleState(handle->connection->handle, &mode, NULL, NULL)) {
/* Report and continue. */
err = GetLastError();
uv_set_sys_error(err);
/* Connection closed */
handle->flags &= ~UV_HANDLE_READING;
handle->flags |= UV_HANDLE_EOF;
uv_last_error_.code = UV_EOF;
uv_last_error_.sys_errno_ = ERROR_SUCCESS;
handle->read_cb((uv_stream_t*)handle, -1, buf);
break;
}
/* Post another 0-read if still reading and not closing. */
uv_pipe_queue_read(handle);
}
break;
case UV_ACCEPT:
if (req->error.code == UV_OK) {
/* Put the connection instance into accept state */
handle->acceptConnection = uv_req_to_pipeinstance(req);
handle->acceptConnection->state = UV_PIPEINSTANCE_ACCEPTED;
if (handle->connection_cb) {
handle->connection_cb((uv_handle_t*)handle, 0);
}
} else {
/* Ignore errors and continue listening */
if (handle->flags & UV_HANDLE_LISTENING) {
uv_pipe_queue_accept(handle);
}
}
break;
case UV_CONNECT:
if (req->cb) {
if (req->error.code == UV_OK) {
uv_init_connection((uv_stream_t*)handle);
((uv_connect_cb)req->cb)(req, 0);
err = GetLastError();
if (err == ERROR_NO_DATA) {
/* Read buffer was completely empty, report a 0-byte read. */
uv_set_sys_error(UV_EAGAIN);
handle->read_cb((uv_stream_t*)handle, 0, buf);
} else {
uv_last_error_ = req->error;
((uv_connect_cb)req->cb)(req, -1);
/* Ouch! serious error. */
uv_set_sys_error(err);
handle->read_cb((uv_stream_t*)handle, -1, buf);
}
break;
}
break;
}
default:
assert(0);
/* TODO: if the read callback stops reading we can't start reading again
because the pipe will still be in nowait mode. */
if (handle->flags & UV_HANDLE_READING) {
/* Switch back to blocking mode so that we can use IOCP for 0-reads */
mode = PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT;
if (SetNamedPipeHandleState(handle->connection->handle, &mode, NULL, NULL)) {
/* Post another 0-read */
uv_pipe_queue_read(handle);
} else {
/* Report and continue. */
/* We can't continue processing this read. */
handle->flags &= ~UV_HANDLE_READING;
uv_set_sys_error(GetLastError());
buf.base = 0;
buf.len = 0;
handle->read_cb((uv_stream_t*)handle, -1, buf);
}
}
}
/* The number of pending requests is now down by one */
handle->reqs_pending--;
DECREASE_PENDING_REQ_COUNT(handle);
}
/* Queue the handle's close callback if it is closing and there are no */
/* more pending requests. */
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
static void uv_process_pipe_write_req(uv_pipe_t* handle, uv_req_t* req) {
DWORD bytes, err, mode;
uv_buf_t buf;
uv_pipe_instance_t* acceptingConn;
assert(handle->type == UV_NAMED_PIPE);
/* Mark the request non-pending */
req->flags &= ~UV_REQ_PENDING;
handle->write_queue_size -= req->queued_bytes;
if (req->cb) {
uv_last_error_ = req->error;
((uv_write_cb)req->cb)(req, uv_last_error_.code == UV_OK ? 0 : -1);
}
handle->write_reqs_pending--;
if (handle->write_reqs_pending == 0 &&
handle->flags & UV_HANDLE_SHUTTING) {
uv_want_endgame((uv_handle_t*)handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
static void uv_process_pipe_accept_req(uv_pipe_t* handle, uv_req_t* req) {
DWORD bytes, err, mode;
uv_buf_t buf;
uv_pipe_instance_t* acceptingConn;
assert(handle->type == UV_NAMED_PIPE);
/* Mark the request non-pending */
req->flags &= ~UV_REQ_PENDING;
if (req->error.code == UV_OK) {
/* Put the connection instance into accept state */
handle->acceptConnection = uv_req_to_pipeinstance(req);
handle->acceptConnection->state = UV_PIPEINSTANCE_ACCEPTED;
if (handle->connection_cb) {
handle->connection_cb((uv_handle_t*)handle, 0);
}
} else {
/* Ignore errors and continue listening */
if (handle->flags & UV_HANDLE_LISTENING) {
uv_pipe_queue_accept(handle);
}
}
DECREASE_PENDING_REQ_COUNT(handle);
}
static void uv_process_pipe_connect_req(uv_pipe_t* handle, uv_req_t* req) {
DWORD bytes, err, mode;
uv_buf_t buf;
uv_pipe_instance_t* acceptingConn;
assert(handle->type == UV_NAMED_PIPE);
/* Mark the request non-pending */
req->flags &= ~UV_REQ_PENDING;
if (req->cb) {
if (req->error.code == UV_OK) {
uv_init_connection((uv_stream_t*)handle);
((uv_connect_cb)req->cb)(req, 0);
} else {
uv_last_error_ = req->error;
((uv_connect_cb)req->cb)(req, -1);
}
}
DECREASE_PENDING_REQ_COUNT(handle);
}
@ -2147,7 +2196,7 @@ int uv_async_send(uv_async_t* handle) {
}
static void uv_async_return_req(uv_async_t* handle, uv_req_t* req) {
static void uv_process_async_wakeup_req(uv_async_t* handle, uv_req_t* req) {
assert(handle->type == UV_ASYNC);
assert(req->type == UV_WAKEUP);
@ -2161,36 +2210,58 @@ static void uv_async_return_req(uv_async_t* handle, uv_req_t* req) {
}
#define DELEGATE_STREAM_REQ(req, method) \
do { \
switch (req->handle->type) { \
case UV_TCP: \
uv_process_tcp_##method##_req((uv_tcp_t*) req->handle, req); \
break; \
\
case UV_NAMED_PIPE: \
uv_process_pipe_##method##_req((uv_pipe_t*) req->handle, req); \
break; \
\
default: \
assert(0); \
} \
} while (0)
static void uv_process_reqs() {
uv_req_t* req;
uv_handle_t* handle;
while (req = uv_remove_pending_req()) {
handle = req->handle;
switch (handle->type) {
case UV_TCP:
uv_tcp_return_req((uv_tcp_t*)handle, req);
switch (req->type) {
case UV_READ:
DELEGATE_STREAM_REQ(req, read);
break;
case UV_NAMED_PIPE:
uv_pipe_return_req((uv_pipe_t*)handle, req);
case UV_WRITE:
DELEGATE_STREAM_REQ(req, write);
break;
case UV_ASYNC:
uv_async_return_req((uv_async_t*)handle, req);
case UV_ACCEPT:
DELEGATE_STREAM_REQ(req, accept);
break;
case UV_ARES:
uv_ares_process((uv_ares_action_t*)handle, req);
case UV_CONNECT:
DELEGATE_STREAM_REQ(req, connect);
break;
case UV_ARES_TASK:
uv_ares_task_cleanup((uv_ares_task_t*)handle, req);
case UV_WAKEUP:
uv_process_async_wakeup_req((uv_async_t*) req->handle, req);
break;
case UV_GETADDRINFO:
uv_getaddrinfo_done((uv_getaddrinfo_t*)handle, req);
case UV_ARES_EVENT_REQ:
uv_process_ares_event_req((uv_ares_action_t*) req->handle, req);
break;
case UV_ARES_CLEANUP_REQ:
uv_process_ares_cleanup_req((uv_ares_task_t*) req->handle, req);
break;
case UV_GETADDRINFO_REQ:
uv_process_getaddrinfo_req((uv_getaddrinfo_t*) req->handle, req);
break;
default:
@ -2416,7 +2487,7 @@ VOID CALLBACK uv_ares_socksignal_tp(void* parameter, BOOLEAN timerfired) {
if (selhandle == NULL) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
selhandle->type = UV_ARES;
selhandle->type = UV_ARES_EVENT;
selhandle->close_cb = NULL;
selhandle->data = sockhandle->data;
selhandle->sock = sockhandle->sock;
@ -2425,7 +2496,7 @@ VOID CALLBACK uv_ares_socksignal_tp(void* parameter, BOOLEAN timerfired) {
uv_ares_req = &selhandle->ares_req;
uv_req_init(uv_ares_req, (uv_handle_t*)selhandle, NULL);
uv_ares_req->type = UV_WAKEUP;
uv_ares_req->type = UV_ARES_EVENT_REQ;
/* post ares needs to called */
if (!PostQueuedCompletionStatus(uv_iocp_,
@ -2473,7 +2544,7 @@ void uv_ares_sockstate_cb(void *data, ares_socket_t sock, int read, int write) {
/* Post request to cleanup the Task */
uv_ares_req = &uv_handle_ares->ares_req;
uv_req_init(uv_ares_req, (uv_handle_t*)uv_handle_ares, NULL);
uv_ares_req->type = UV_WAKEUP;
uv_ares_req->type = UV_ARES_CLEANUP_REQ;
/* post ares done with socket - finish cleanup when all threads done. */
if (!PostQueuedCompletionStatus(uv_iocp_,
@ -2548,7 +2619,7 @@ void uv_ares_sockstate_cb(void *data, ares_socket_t sock, int read, int write) {
}
/* called via uv_poll when ares completion port signaled */
void uv_ares_process(uv_ares_action_t* handle, uv_req_t* req) {
void uv_process_ares_event_req(uv_ares_action_t* handle, uv_req_t* req) {
uv_ares_channel_t* uv_ares_data_ptr = (uv_ares_channel_t*)handle->data;
ares_process_fd(uv_ares_data_ptr->channel,
@ -2560,7 +2631,7 @@ void uv_ares_process(uv_ares_action_t* handle, uv_req_t* req) {
}
/* called via uv_poll when ares is finished with socket */
void uv_ares_task_cleanup(uv_ares_task_t* handle, uv_req_t* req) {
void uv_process_ares_cleanup_req(uv_ares_task_t* handle, uv_req_t* req) {
/* check for event complete without waiting */
unsigned int signaled = WaitForSingleObject(handle->h_close_event, 0);
@ -2692,7 +2763,7 @@ static DWORD WINAPI getaddrinfo_thread_proc(void* parameter) {
* and copy all structs and referenced strings into the one block.
* Each size calculation is adjusted to avoid unaligned pointers.
*/
static void uv_getaddrinfo_done(uv_getaddrinfo_t* handle, uv_req_t* req) {
static void uv_process_getaddrinfo_req(uv_getaddrinfo_t* handle, uv_req_t* req) {
int addrinfo_len = 0;
int name_len = 0;
size_t addrinfo_struct_len = ALIGNED_SIZE(sizeof(struct addrinfo));
@ -2903,7 +2974,7 @@ int uv_getaddrinfo(uv_getaddrinfo_t* handle,
/* init request for Post handling */
uv_req_init(&handle->getadddrinfo_req, (uv_handle_t*)handle, NULL);
handle->getadddrinfo_req.type = UV_WAKEUP;
handle->getadddrinfo_req.type = UV_GETADDRINFO_REQ;
/* Ask thread to run. Treat this as a long operation */
if (QueueUserWorkItem(&getaddrinfo_thread_proc, handle, WT_EXECUTELONGFUNCTION) == 0) {