diff --git a/test/benchmark-ping-pongs.c b/test/benchmark-ping-pongs.c index 1bda3377..49019736 100644 --- a/test/benchmark-ping-pongs.c +++ b/test/benchmark-ping-pongs.c @@ -33,7 +33,7 @@ typedef struct { int pongs; int state; - uv_handle_t handle; + uv_tcp_t tcp; uv_req_t connect_req; uv_req_t shutdown_req; } pinger_t; @@ -52,7 +52,7 @@ static int completed_pingers = 0; static int64_t start_time; -static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size) { +static uv_buf_t buf_alloc(uv_tcp_t* tcp, size_t size) { buf_t* ab; ab = buf_freelist; @@ -107,7 +107,7 @@ static void pinger_write_ping(pinger_t* pinger) { buf.len = strlen(PING); req = (uv_req_t*)malloc(sizeof(*req)); - uv_req_init(req, &pinger->handle, pinger_write_cb); + uv_req_init(req, (uv_handle_t*)(&pinger->tcp), pinger_write_cb); if (uv_write(req, &buf, 1)) { FATAL("uv_write failed"); @@ -120,11 +120,11 @@ static void pinger_shutdown_cb(uv_handle_t* handle, int status) { } -static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { +static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) { unsigned int i; pinger_t* pinger; - pinger = (pinger_t*)handle->data; + pinger = (pinger_t*)tcp->data; if (nread < 0) { ASSERT(uv_last_error().code == UV_EOF); @@ -143,7 +143,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { if (pinger->state == 0) { pinger->pongs++; if (uv_now() - start_time > TIME) { - uv_req_init(&pinger->shutdown_req, handle, pinger_shutdown_cb); + uv_req_init(&pinger->shutdown_req, (uv_handle_t*)tcp, pinger_shutdown_cb); uv_shutdown(&pinger->shutdown_req); break; return; @@ -164,7 +164,7 @@ static void pinger_connect_cb(uv_req_t *req, int status) { pinger_write_ping(pinger); - if (uv_read_start(req->handle, pinger_read_cb)) { + if (uv_read_start((uv_tcp_t*)(req->handle), pinger_read_cb)) { FATAL("uv_read_start failed"); } } @@ -181,14 +181,15 @@ static void pinger_new() { pinger->pongs = 0; /* Try to connec to the server and do NUM_PINGS ping-pongs. */ - r = uv_tcp_init(&pinger->handle, pinger_close_cb, (void*)pinger); + r = uv_tcp_init(&pinger->tcp, pinger_close_cb, (void*)pinger); ASSERT(!r); /* We are never doing multiple reads/connects at a time anyway. */ /* so these handles can be pre-initialized. */ - uv_req_init(&pinger->connect_req, &pinger->handle, pinger_connect_cb); + uv_req_init(&pinger->connect_req, (uv_handle_t*)&pinger->tcp, + pinger_connect_cb); - uv_bind(&pinger->handle, (struct sockaddr*)&client_addr); + uv_bind(&pinger->tcp, (struct sockaddr*)&client_addr); r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr); ASSERT(!r); } diff --git a/test/benchmark-pump.c b/test/benchmark-pump.c index 72daa331..2249aa99 100644 --- a/test/benchmark-pump.c +++ b/test/benchmark-pump.c @@ -35,17 +35,17 @@ static int TARGET_CONNECTIONS; #define STATS_COUNT 5 -static void do_write(uv_handle_t* handle); +static void do_write(uv_tcp_t*); static void maybe_connect_some(); static uv_req_t* req_alloc(); static void req_free(uv_req_t* uv_req); -static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size); +static uv_buf_t buf_alloc(uv_tcp_t*, size_t size); static void buf_free(uv_buf_t uv_buf_t); -static uv_handle_t server; +static uv_tcp_t server; static struct sockaddr_in listen_addr; static struct sockaddr_in connect_addr; @@ -68,9 +68,9 @@ static char write_buffer[WRITE_BUFFER_SIZE]; /* Make this as large as you need. */ #define MAX_WRITE_HANDLES 1000 -static uv_handle_t write_handles[MAX_WRITE_HANDLES]; +static uv_tcp_t write_handles[MAX_WRITE_HANDLES]; -static uv_handle_t timer_handle; +static uv_timer_t timer_handle; static double gbit(int64_t bytes, int64_t passed_ms) { @@ -136,7 +136,7 @@ void read_sockets_close_cb(uv_handle_t* handle, int status) { */ if (uv_now() - start_time > 1000 && read_sockets == 0) { read_show_stats(); - uv_close(&server); + uv_close((uv_handle_t*)&server); } } @@ -157,7 +157,7 @@ static void start_stats_collection() { } -static void read_cb(uv_handle_t* handle, int bytes, uv_buf_t buf) { +static void read_cb(uv_tcp_t* tcp, int bytes, uv_buf_t buf) { if (nrecv_total == 0) { ASSERT(start_time == 0); uv_update_time(); @@ -165,7 +165,7 @@ static void read_cb(uv_handle_t* handle, int bytes, uv_buf_t buf) { } if (bytes < 0) { - uv_close(handle); + uv_close((uv_handle_t*)tcp); return; } @@ -186,11 +186,11 @@ static void write_cb(uv_req_t *req, int status) { nsent += sizeof write_buffer; nsent_total += sizeof write_buffer; - do_write(req->handle); + do_write((uv_tcp_t*)req->handle); } -static void do_write(uv_handle_t* handle) { +static void do_write(uv_tcp_t* tcp) { uv_req_t* req; uv_buf_t buf; int r; @@ -198,9 +198,9 @@ static void do_write(uv_handle_t* handle) { buf.base = (char*) &write_buffer; buf.len = sizeof write_buffer; - while (handle->write_queue_size == 0) { + while (tcp->write_queue_size == 0) { req = req_alloc(); - uv_req_init(req, handle, write_cb); + uv_req_init(req, (uv_handle_t*)tcp, write_cb); r = uv_write(req, &buf, 1); ASSERT(r == 0); @@ -232,36 +232,36 @@ static void connect_cb(uv_req_t* req, int status) { static void maybe_connect_some() { uv_req_t* req; - uv_handle_t* handle; + uv_tcp_t* tcp; int r; while (max_connect_socket < TARGET_CONNECTIONS && max_connect_socket < write_sockets + MAX_SIMULTANEOUS_CONNECTS) { - handle = &write_handles[max_connect_socket++]; + tcp = &write_handles[max_connect_socket++]; - r = uv_tcp_init(handle, write_sockets_close_cb, NULL); + r = uv_tcp_init(tcp, write_sockets_close_cb, NULL); ASSERT(r == 0); req = req_alloc(); - uv_req_init(req, handle, connect_cb); + uv_req_init(req, (uv_handle_t*)tcp, connect_cb); r = uv_connect(req, (struct sockaddr*) &connect_addr); ASSERT(r == 0); } } -static void accept_cb(uv_handle_t* s) { - uv_handle_t* handle; +static void accept_cb(uv_tcp_t* s) { + uv_tcp_t* tcp; int r; ASSERT(&server == s); - handle = malloc(sizeof(uv_handle_t)); + tcp = malloc(sizeof(uv_tcp_t)); - r = uv_accept(s, handle, read_sockets_close_cb, NULL); + r = uv_accept(s, tcp, read_sockets_close_cb, NULL); ASSERT(r == 0); - r = uv_read_start(handle, read_cb); + r = uv_read_start(tcp, read_cb); ASSERT(r == 0); read_sockets++; @@ -317,7 +317,7 @@ typedef struct buf_list_s { static buf_list_t* buf_freelist = NULL; -static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size) { +static uv_buf_t buf_alloc(uv_tcp_t* tcp, size_t size) { buf_list_t* buf; buf = buf_freelist; diff --git a/test/benchmark-sizes.c b/test/benchmark-sizes.c index cc8f7d43..a5f573fe 100644 --- a/test/benchmark-sizes.c +++ b/test/benchmark-sizes.c @@ -24,7 +24,12 @@ BENCHMARK_IMPL(sizes) { - LOGF("uv_handle_t: %lu bytes\n", sizeof(uv_handle_t)); LOGF("uv_req_t: %lu bytes\n", sizeof(uv_req_t)); + LOGF("uv_tcp_t: %lu bytes\n", sizeof(uv_tcp_t)); + LOGF("uv_prepare_t: %lu bytes\n", sizeof(uv_prepare_t)); + LOGF("uv_check_t: %lu bytes\n", sizeof(uv_check_t)); + LOGF("uv_idle_t: %lu bytes\n", sizeof(uv_idle_t)); + LOGF("uv_async_t: %lu bytes\n", sizeof(uv_async_t)); + LOGF("uv_timer_t: %lu bytes\n", sizeof(uv_timer_t)); return 0; } diff --git a/test/echo-server.c b/test/echo-server.c index 5dd6a42b..04ee796f 100644 --- a/test/echo-server.c +++ b/test/echo-server.c @@ -32,13 +32,13 @@ typedef struct { static int server_closed; -static uv_handle_t server; +static uv_tcp_t server; static void after_write(uv_req_t* req, int status); -static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf); +static void after_read(uv_tcp_t*, int nread, uv_buf_t buf); static void on_close(uv_handle_t* peer, int status); -static void on_accept(uv_handle_t* handle); +static void on_accept(uv_tcp_t*); static void after_write(uv_req_t* req, int status) { @@ -64,7 +64,7 @@ static void after_shutdown(uv_req_t* req, int status) { } -static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) { +static void after_read(uv_tcp_t* handle, int nread, uv_buf_t buf) { int i; write_req_t *wr; uv_req_t* req; @@ -78,7 +78,7 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) { } req = (uv_req_t*) malloc(sizeof *req); - uv_req_init(req, handle, after_shutdown); + uv_req_init(req, (uv_handle_t*)handle, after_shutdown); uv_shutdown(req); return; @@ -94,7 +94,7 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) { if (!server_closed) { for (i = 0; i < nread; i++) { if (buf.base[i] == 'Q') { - uv_close(&server); + uv_close((uv_handle_t*)&server); server_closed = 1; } } @@ -102,7 +102,7 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) { wr = (write_req_t*) malloc(sizeof *wr); - uv_req_init(&wr->req, handle, after_write); + uv_req_init(&wr->req, (uv_handle_t*)handle, after_write); wr->buf.base = buf.base; wr->buf.len = nread; if (uv_write(&wr->req, &wr->buf, 1)) { @@ -118,8 +118,8 @@ static void on_close(uv_handle_t* peer, int status) { } -static void on_accept(uv_handle_t* server) { - uv_handle_t* handle = (uv_handle_t*) malloc(sizeof *handle); +static void on_accept(uv_tcp_t* server) { + uv_tcp_t* handle = (uv_tcp_t*) malloc(sizeof *handle); if (uv_accept(server, handle, on_close, NULL)) { FATAL("uv_accept failed"); @@ -130,7 +130,7 @@ static void on_accept(uv_handle_t* server) { static void on_server_close(uv_handle_t* handle, int status) { - ASSERT(handle == &server); + ASSERT(handle == (uv_handle_t*)&server); ASSERT(status == 0); } @@ -164,7 +164,7 @@ static int echo_start(int port) { } -static uv_buf_t echo_alloc(uv_handle_t* handle, size_t suggested_size) { +static uv_buf_t echo_alloc(uv_tcp_t* handle, size_t suggested_size) { uv_buf_t buf; buf.base = (char*) malloc(suggested_size); buf.len = suggested_size; diff --git a/test/test-async.c b/test/test-async.c index 7bedcf4a..1095a9ab 100644 --- a/test/test-async.c +++ b/test/test-async.c @@ -25,9 +25,9 @@ #include -static uv_handle_t prepare_handle; +static uv_prepare_t prepare_handle; -static uv_handle_t async1_handle; +static uv_async_t async1_handle; /* static uv_handle_t async2_handle; */ static int prepare_cb_called = 0; @@ -120,7 +120,7 @@ static void close_cb(uv_handle_t* handle, int status) { } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* handle, size_t size) { uv_buf_t buf = {0, 0}; FATAL("alloc should not be called"); return buf; @@ -128,7 +128,7 @@ static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { static void async1_cb(uv_handle_t* handle, int status) { - ASSERT(handle == &async1_handle); + ASSERT(handle == (uv_handle_t*)&async1_handle); ASSERT(status == 0); async1_cb_called++; @@ -159,7 +159,7 @@ static void async2_cb(uv_handle_t* handle, int status) { static void prepare_cb(uv_handle_t* handle, int status) { int r; - ASSERT(handle == &prepare_handle); + ASSERT(handle == (uv_handle_t*)&prepare_handle); ASSERT(status == 0); switch (prepare_cb_called) { diff --git a/test/test-bind-error.c b/test/test-bind-error.c index ca9ccc58..102dea30 100644 --- a/test/test-bind-error.c +++ b/test/test-bind-error.c @@ -36,7 +36,7 @@ static void close_cb(uv_handle_t* handle, int status) { } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* handle, size_t size) { uv_buf_t buf = {0, 0}; FATAL("alloc should not be called"); return buf; @@ -45,7 +45,7 @@ static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { TEST_IMPL(bind_error_addrinuse) { struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT); - uv_handle_t server1, server2; + uv_tcp_t server1, server2; int r; uv_init(alloc_cb); @@ -67,8 +67,8 @@ TEST_IMPL(bind_error_addrinuse) { ASSERT(uv_last_error().code == UV_EADDRINUSE); - uv_close(&server1); - uv_close(&server2); + uv_close((uv_handle_t*)&server1); + uv_close((uv_handle_t*)&server2); uv_run(); @@ -80,7 +80,7 @@ TEST_IMPL(bind_error_addrinuse) { TEST_IMPL(bind_error_addrnotavail_1) { struct sockaddr_in addr = uv_ip4_addr("127.255.255.255", TEST_PORT); - uv_handle_t server; + uv_tcp_t server; int r; uv_init(alloc_cb); @@ -94,7 +94,7 @@ TEST_IMPL(bind_error_addrnotavail_1) { ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL); } - uv_close(&server); + uv_close((uv_handle_t*)&server); uv_run(); @@ -106,7 +106,7 @@ TEST_IMPL(bind_error_addrnotavail_1) { TEST_IMPL(bind_error_addrnotavail_2) { struct sockaddr_in addr = uv_ip4_addr("4.4.4.4", TEST_PORT); - uv_handle_t server; + uv_tcp_t server; int r; uv_init(alloc_cb); @@ -117,7 +117,7 @@ TEST_IMPL(bind_error_addrnotavail_2) { ASSERT(r == -1); ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL); - uv_close(&server); + uv_close((uv_handle_t*)&server); uv_run(); @@ -129,7 +129,7 @@ TEST_IMPL(bind_error_addrnotavail_2) { TEST_IMPL(bind_error_fault) { char garbage[] = "blah blah blah blah blah blah blah blah blah blah blah blah"; - uv_handle_t server; + uv_tcp_t server; int r; uv_init(alloc_cb); @@ -141,7 +141,7 @@ TEST_IMPL(bind_error_fault) { ASSERT(uv_last_error().code == UV_EFAULT); - uv_close(&server); + uv_close((uv_handle_t*)&server); uv_run(); @@ -155,7 +155,7 @@ TEST_IMPL(bind_error_fault) { TEST_IMPL(bind_error_inval) { struct sockaddr_in addr1 = uv_ip4_addr("0.0.0.0", TEST_PORT); struct sockaddr_in addr2 = uv_ip4_addr("0.0.0.0", TEST_PORT_2); - uv_handle_t server; + uv_tcp_t server; int r; uv_init(alloc_cb); @@ -169,7 +169,7 @@ TEST_IMPL(bind_error_inval) { ASSERT(uv_last_error().code == UV_EINVAL); - uv_close(&server); + uv_close((uv_handle_t*)&server); uv_run(); diff --git a/test/test-callback-stack.c b/test/test-callback-stack.c index d136c9a7..f04ed9f0 100644 --- a/test/test-callback-stack.c +++ b/test/test-callback-stack.c @@ -30,7 +30,8 @@ static const char MESSAGE[] = "Failure is for the weak. Everyone dies alone."; -static uv_handle_t client, timer; +static uv_tcp_t client; +static uv_timer_t timer; static uv_req_t connect_req, write_req, shutdown_req; static int nested = 0; @@ -58,7 +59,7 @@ static void shutdown_cb(uv_req_t* req, int status) { } -static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { +static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) { ASSERT(nested == 0 && "read_cb must be called from a fresh stack"); printf("Read. nread == %d\n", nread); @@ -72,7 +73,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { ASSERT(uv_last_error().code == UV_EOF); nested++; - if (uv_close(handle)) { + if (uv_close((uv_handle_t*)tcp)) { FATAL("uv_close failed"); } nested--; @@ -88,7 +89,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { /* from a fresh stack. */ if (bytes_received == sizeof MESSAGE) { nested++; - uv_req_init(&shutdown_req, handle, shutdown_cb); + uv_req_init(&shutdown_req, (uv_handle_t*)tcp, shutdown_cb); puts("Shutdown"); @@ -103,7 +104,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { static void timer_cb(uv_handle_t* handle, int status) { int r; - ASSERT(handle == &timer); + ASSERT(handle == (uv_handle_t*)&timer); ASSERT(status == 0); ASSERT(nested == 0 && "timer_cb must be called from a fresh stack"); @@ -170,7 +171,7 @@ static void connect_cb(uv_req_t* req, int status) { } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) { uv_buf_t buf; buf.len = size; buf.base = (char*) malloc(size); @@ -191,7 +192,7 @@ TEST_IMPL(callback_stack) { puts("Connecting..."); nested++; - uv_req_init(&connect_req, &client, connect_cb); + uv_req_init(&connect_req, (uv_handle_t*)&client, connect_cb); if (uv_connect(&connect_req, (struct sockaddr*) &addr)) { FATAL("uv_connect failed"); } diff --git a/test/test-connection-fail.c b/test/test-connection-fail.c index e28c7c36..46457257 100644 --- a/test/test-connection-fail.c +++ b/test/test-connection-fail.c @@ -26,7 +26,7 @@ #include -static uv_handle_t handle; +static uv_tcp_t tcp; static uv_req_t req; static int connect_cb_calls; static int close_cb_calls; @@ -46,7 +46,7 @@ static void on_connect(uv_req_t *req, int status) { } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) { uv_buf_t buf = {0, 0}; FATAL("alloc should not be called"); return buf; @@ -65,14 +65,14 @@ TEST_IMPL(connection_fail) { server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT); /* Try to connec to the server and do NUM_PINGS ping-pongs. */ - r = uv_tcp_init(&handle, on_close, NULL); + r = uv_tcp_init(&tcp, on_close, NULL); ASSERT(!r); /* We are never doing multiple reads/connects at a time anyway. */ /* so these handles can be pre-initialized. */ - uv_req_init(&req, &handle, on_connect); + uv_req_init(&req, (uv_handle_t*)&tcp, on_connect); - uv_bind(&handle, (struct sockaddr*)&client_addr); + uv_bind(&tcp, (struct sockaddr*)&client_addr); r = uv_connect(&req, (struct sockaddr*)&server_addr); ASSERT(!r); diff --git a/test/test-delayed-accept.c b/test/test-delayed-accept.c index e372b955..374a633a 100644 --- a/test/test-delayed-accept.c +++ b/test/test-delayed-accept.c @@ -44,27 +44,27 @@ static void close_cb(uv_handle_t* handle, int status) { static void do_accept(uv_handle_t* timer_handle, int status) { - uv_handle_t* server; - uv_handle_t* accepted_handle = (uv_handle_t*)malloc(sizeof *accepted_handle); + uv_tcp_t* server; + uv_tcp_t* accepted_handle = (uv_tcp_t*)malloc(sizeof *accepted_handle); int r; ASSERT(timer_handle != NULL); ASSERT(status == 0); ASSERT(accepted_handle != NULL); - server = (uv_handle_t*)timer_handle->data; + server = (uv_tcp_t*)timer_handle->data; r = uv_accept(server, accepted_handle, close_cb, NULL); ASSERT(r == 0); do_accept_called++; /* Immediately close the accepted handle. */ - r = uv_close(accepted_handle); + r = uv_close((uv_handle_t*)accepted_handle); ASSERT(r == 0); /* After accepting the two clients close the server handle */ if (do_accept_called == 2) { - r = uv_close(server); + r = uv_close((uv_handle_t*)server); ASSERT(r == 0); } @@ -74,15 +74,15 @@ static void do_accept(uv_handle_t* timer_handle, int status) { } -static void accept_cb(uv_handle_t* handle) { +static void accept_cb(uv_tcp_t* tcp) { int r; - uv_handle_t* timer_handle; + uv_timer_t* timer_handle; - timer_handle = (uv_handle_t*)malloc(sizeof *timer_handle); + timer_handle = (uv_timer_t*)malloc(sizeof *timer_handle); ASSERT(timer_handle != NULL); /* Accept the client after 1 second */ - r = uv_timer_init(timer_handle, close_cb, (void*)handle); + r = uv_timer_init(timer_handle, close_cb, (void*)tcp); ASSERT(r == 0); r = uv_timer_start(timer_handle, do_accept, 1000, 0); ASSERT(r == 0); @@ -93,7 +93,7 @@ static void accept_cb(uv_handle_t* handle) { static void start_server() { struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT); - uv_handle_t* server = (uv_handle_t*)malloc(sizeof *server); + uv_tcp_t* server = (uv_tcp_t*)malloc(sizeof *server); int r; ASSERT(server != NULL); @@ -109,9 +109,9 @@ static void start_server() { } -static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { +static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) { /* The server will not send anything, it should close gracefully. */ - ASSERT(handle != NULL); + ASSERT(tcp != NULL); ASSERT(nread == -1); ASSERT(uv_last_error().code == UV_EOF); @@ -119,7 +119,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { free(buf.base); } - uv_close(handle); + uv_close((uv_handle_t*)tcp); } @@ -131,7 +131,7 @@ static void connect_cb(uv_req_t* req, int status) { /* Not that the server will send anything, but otherwise we'll never know */ /* when te server closes the connection. */ - r = uv_read_start(req->handle, read_cb); + r = uv_read_start((uv_tcp_t*)(req->handle), read_cb); ASSERT(r == 0); connect_cb_called++; @@ -142,7 +142,7 @@ static void connect_cb(uv_req_t* req, int status) { static void client_connect() { struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT); - uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client); + uv_tcp_t* client = (uv_tcp_t*)malloc(sizeof *client); uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req); int r; @@ -152,13 +152,13 @@ static void client_connect() { r = uv_tcp_init(client, close_cb, NULL); ASSERT(r == 0); - uv_req_init(connect_req, client, connect_cb); + uv_req_init(connect_req, (uv_handle_t*)client, connect_cb); r = uv_connect(connect_req, (struct sockaddr*)&addr); ASSERT(r == 0); } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) { uv_buf_t buf; buf.base = (char*)malloc(size); buf.len = size; diff --git a/test/test-loop-handles.c b/test/test-loop-handles.c index 77055b43..694e7c9c 100644 --- a/test/test-loop-handles.c +++ b/test/test-loop-handles.c @@ -75,15 +75,15 @@ #define TIMEOUT 100 -static uv_handle_t prepare_1_handle; -static uv_handle_t prepare_2_handle; +static uv_prepare_t prepare_1_handle; +static uv_prepare_t prepare_2_handle; -static uv_handle_t check_handle; +static uv_check_t check_handle; -static uv_handle_t idle_1_handles[IDLE_COUNT]; -static uv_handle_t idle_2_handle; +static uv_idle_t idle_1_handles[IDLE_COUNT]; +static uv_idle_t idle_2_handle; -static uv_handle_t timer_handle; +static uv_timer_t timer_handle; static int loop_iteration = 0; @@ -110,7 +110,7 @@ static int timer_cb_called = 0; static void timer_cb(uv_handle_t* handle, int status) { - ASSERT(handle == &timer_handle); + ASSERT(handle == (uv_handle_t*)&timer_handle); ASSERT(status == 0); timer_cb_called++; @@ -127,7 +127,7 @@ static void idle_2_cb(uv_handle_t* handle, int status) { LOG("IDLE_2_CB\n"); - ASSERT(handle == &idle_2_handle); + ASSERT(handle == (uv_handle_t*)&idle_2_handle); ASSERT(status == 0); idle_2_cb_called++; @@ -140,7 +140,7 @@ static void idle_2_cb(uv_handle_t* handle, int status) { static void idle_2_close_cb(uv_handle_t* handle, int status){ LOG("IDLE_2_CLOSE_CB\n"); - ASSERT(handle == &idle_2_handle); + ASSERT(handle == (uv_handle_t*)&idle_2_handle); ASSERT(status == 0); ASSERT(idle_2_is_active); @@ -173,7 +173,7 @@ static void idle_1_cb(uv_handle_t* handle, int status) { idle_1_cb_called++; if (idle_1_cb_called % 5 == 0) { - r = uv_idle_stop(handle); + r = uv_idle_stop((uv_idle_t*)handle); ASSERT(r == 0); idles_1_active--; } @@ -195,7 +195,7 @@ static void check_cb(uv_handle_t* handle, int status) { LOG("CHECK_CB\n"); - ASSERT(handle == &check_handle); + ASSERT(handle == (uv_handle_t*)&check_handle); ASSERT(status == 0); /* XXX @@ -213,22 +213,22 @@ static void check_cb(uv_handle_t* handle, int status) { } else { /* End of the test - close all handles */ - r = uv_close(&prepare_1_handle); + r = uv_close((uv_handle_t*)&prepare_1_handle); ASSERT(r == 0); - r = uv_close(&check_handle); + r = uv_close((uv_handle_t*)&check_handle); ASSERT(r == 0); - r = uv_close(&prepare_2_handle); + r = uv_close((uv_handle_t*)&prepare_2_handle); ASSERT(r == 0); for (i = 0; i < IDLE_COUNT; i++) { - r = uv_close(&idle_1_handles[i]); + r = uv_close((uv_handle_t*)&idle_1_handles[i]); ASSERT(r == 0); } /* This handle is closed/recreated every time, close it only if it is */ /* active.*/ if (idle_2_is_active) { - r = uv_close(&idle_2_handle); + r = uv_close((uv_handle_t*)&idle_2_handle); ASSERT(r == 0); } } @@ -239,7 +239,7 @@ static void check_cb(uv_handle_t* handle, int status) { static void check_close_cb(uv_handle_t* handle, int status){ LOG("CHECK_CLOSE_CB\n"); - ASSERT(handle == &check_handle); + ASSERT(handle == (uv_handle_t*)&check_handle); ASSERT(status == 0); check_close_cb_called++; @@ -251,7 +251,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) { LOG("PREPARE_2_CB\n"); - ASSERT(handle == &prepare_2_handle); + ASSERT(handle == (uv_handle_t*)&prepare_2_handle); ASSERT(status == 0); /* XXX ASSERT(idles_1_active == 0); */ @@ -263,7 +263,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) { /* (loop_iteration % 2 == 0) cannot be true. */ ASSERT(loop_iteration % 2 != 0); - r = uv_prepare_stop(handle); + r = uv_prepare_stop((uv_prepare_t*)handle); ASSERT(r == 0); prepare_2_cb_called++; @@ -272,7 +272,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) { static void prepare_2_close_cb(uv_handle_t* handle, int status) { LOG("PREPARE_2_CLOSE_CB\n"); - ASSERT(handle == &prepare_2_handle); + ASSERT(handle == (uv_handle_t*)&prepare_2_handle); ASSERT(status == 0); prepare_2_close_cb_called++; @@ -284,7 +284,7 @@ static void prepare_1_cb(uv_handle_t* handle, int status) { LOG("PREPARE_1_CB\n"); - ASSERT(handle == &prepare_1_handle); + ASSERT(handle == (uv_handle_t*)&prepare_1_handle); ASSERT(status == 0); /* XXX @@ -306,14 +306,14 @@ static void prepare_1_cb(uv_handle_t* handle, int status) { static void prepare_1_close_cb(uv_handle_t* handle, int status){ LOG("PREPARE_1_CLOSE_CB"); - ASSERT(handle == &prepare_1_handle); + ASSERT(handle == (uv_handle_t*)&prepare_1_handle); ASSERT(status == 0); prepare_1_close_cb_called++; } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) { uv_buf_t rv = { 0, 0 }; FATAL("alloc_cb should never be called in this test"); return rv; @@ -394,43 +394,43 @@ TEST_IMPL(ref) { TEST_IMPL(idle_ref) { - uv_handle_t h; + uv_idle_t h; uv_init(alloc_cb); uv_idle_init(&h, NULL, NULL); uv_idle_start(&h, NULL); - uv_unref(&h); + uv_unref(); uv_run(); return 0; } TEST_IMPL(async_ref) { - uv_handle_t h; + uv_async_t h; uv_init(alloc_cb); uv_async_init(&h, NULL, NULL, NULL); - uv_unref(&h); + uv_unref(); uv_run(); return 0; } TEST_IMPL(prepare_ref) { - uv_handle_t h; + uv_prepare_t h; uv_init(alloc_cb); uv_prepare_init(&h, NULL, NULL); uv_prepare_start(&h, NULL); - uv_unref(&h); + uv_unref(); uv_run(); return 0; } TEST_IMPL(check_ref) { - uv_handle_t h; + uv_check_t h; uv_init(alloc_cb); uv_check_init(&h, NULL, NULL); uv_check_start(&h, NULL); - uv_unref(&h); + uv_unref(); uv_run(); return 0; } diff --git a/test/test-ping-pong.c b/test/test-ping-pong.c index 13d114e3..3bd7d978 100644 --- a/test/test-ping-pong.c +++ b/test/test-ping-pong.c @@ -39,7 +39,7 @@ static char PING[] = "PING\n"; typedef struct { int pongs; int state; - uv_handle_t handle; + uv_tcp_t tcp; uv_req_t connect_req; uv_req_t read_req; char read_buffer[BUFSIZE]; @@ -75,7 +75,7 @@ static void pinger_write_ping(pinger_t* pinger) { buf.len = strlen(PING); req = (uv_req_t*)malloc(sizeof(*req)); - uv_req_init(req, &pinger->handle, pinger_after_write); + uv_req_init(req, (uv_handle_t*)(&pinger->tcp), pinger_after_write); if (uv_write(req, &buf, 1)) { FATAL("uv_write failed"); @@ -85,11 +85,11 @@ static void pinger_write_ping(pinger_t* pinger) { } -static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { +static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) { unsigned int i; pinger_t* pinger; - pinger = (pinger_t*)handle->data; + pinger = (pinger_t*)tcp->data; if (nread < 0) { ASSERT(uv_last_error().code == UV_EOF); @@ -100,7 +100,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { free(buf.base); } - uv_close(&pinger->handle); + uv_close((uv_handle_t*)(&pinger->tcp)); return; } @@ -115,7 +115,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { if (pinger->pongs < NUM_PINGS) { pinger_write_ping(pinger); } else { - uv_close(&pinger->handle); + uv_close((uv_handle_t*)(&pinger->tcp)); return; } } @@ -130,7 +130,7 @@ static void pinger_on_connect(uv_req_t *req, int status) { pinger_write_ping(pinger); - uv_read_start(req->handle, pinger_read_cb); + uv_read_start((uv_tcp_t*)(req->handle), pinger_read_cb); } @@ -144,19 +144,20 @@ static void pinger_new() { pinger->pongs = 0; /* Try to connec to the server and do NUM_PINGS ping-pongs. */ - r = uv_tcp_init(&pinger->handle, pinger_on_close, (void*)pinger); + r = uv_tcp_init(&pinger->tcp, pinger_on_close, (void*)pinger); ASSERT(!r); /* We are never doing multiple reads/connects at a time anyway. */ /* so these handles can be pre-initialized. */ - uv_req_init(&pinger->connect_req, &pinger->handle, pinger_on_connect); + uv_req_init(&pinger->connect_req, (uv_handle_t*)(&pinger->tcp), + pinger_on_connect); r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr); ASSERT(!r); } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) { uv_buf_t buf; buf.base = (char*)malloc(size); buf.len = size; diff --git a/test/test-tcp-writealot.c b/test/test-tcp-writealot.c index 15cc2d5b..ff88bc14 100644 --- a/test/test-tcp-writealot.c +++ b/test/test-tcp-writealot.c @@ -59,8 +59,10 @@ static void shutdown_cb(uv_req_t* req, int status) { ASSERT(req); ASSERT(status == 0); + uv_tcp_t* tcp = (uv_tcp_t*)(req->handle); + /* The write buffer should be empty by now. */ - ASSERT(req->handle->write_queue_size == 0); + ASSERT(tcp->write_queue_size == 0); /* Now we wait for the EOF */ shutdown_cb_called++; @@ -72,8 +74,8 @@ static void shutdown_cb(uv_req_t* req, int status) { } -static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { - ASSERT(handle != NULL); +static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) { + ASSERT(tcp != NULL); if (nread < 0) { ASSERT(uv_last_error().code == UV_EOF); @@ -83,7 +85,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { free(buf.base); } - uv_close(handle); + uv_close((uv_handle_t*)tcp); return; } @@ -111,13 +113,13 @@ static void write_cb(uv_req_t* req, int status) { static void connect_cb(uv_req_t* req, int status) { uv_buf_t send_bufs[CHUNKS_PER_WRITE]; - uv_handle_t* handle; + uv_tcp_t* tcp; int i, j, r; ASSERT(req != NULL); ASSERT(status == 0); - handle = req->handle; + tcp = (uv_tcp_t*)req->handle; connect_cb_called++; free(req); @@ -133,7 +135,7 @@ static void connect_cb(uv_req_t* req, int status) { req = (uv_req_t*)malloc(sizeof *req); ASSERT(req != NULL); - uv_req_init(req, handle, write_cb); + uv_req_init(req, (uv_handle_t*)tcp, write_cb); r = uv_write(req, (uv_buf_t*)&send_bufs, CHUNKS_PER_WRITE); ASSERT(r == 0); } @@ -141,7 +143,7 @@ static void connect_cb(uv_req_t* req, int status) { /* Shutdown on drain. FIXME: dealloc req? */ req = (uv_req_t*) malloc(sizeof(uv_req_t)); ASSERT(req != NULL); - uv_req_init(req, handle, shutdown_cb); + uv_req_init(req, (uv_handle_t*)tcp, shutdown_cb); r = uv_shutdown(req); ASSERT(r == 0); @@ -149,13 +151,13 @@ static void connect_cb(uv_req_t* req, int status) { req = (uv_req_t*)malloc(sizeof *req); ASSERT(req != NULL); - uv_req_init(req, handle, read_cb); - r = uv_read_start(handle, read_cb); + uv_req_init(req, (uv_handle_t*)tcp, read_cb); + r = uv_read_start(tcp, read_cb); ASSERT(r == 0); } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) { uv_buf_t buf; buf.base = (char*)malloc(size); buf.len = size; @@ -165,7 +167,7 @@ static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { TEST_IMPL(tcp_writealot) { struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT); - uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client); + uv_tcp_t* client = (uv_tcp_t*)malloc(sizeof *client); uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req); int r; @@ -181,7 +183,7 @@ TEST_IMPL(tcp_writealot) { r = uv_tcp_init(client, close_cb, NULL); ASSERT(r == 0); - uv_req_init(connect_req, client, connect_cb); + uv_req_init(connect_req, (uv_handle_t*)client, connect_cb); r = uv_connect(connect_req, (struct sockaddr*)&addr); ASSERT(r == 0); diff --git a/test/test-timer-again.c b/test/test-timer-again.c index 1585c09d..19e57cb2 100644 --- a/test/test-timer-again.c +++ b/test/test-timer-again.c @@ -29,7 +29,7 @@ static int repeat_2_cb_called = 0; static int repeat_2_cb_allowed = 0; -static uv_handle_t dummy, repeat_1, repeat_2; +static uv_timer_t dummy, repeat_1, repeat_2; static int64_t start_time; @@ -45,10 +45,10 @@ static void close_cb(uv_handle_t* handle, int status) { static void repeat_1_cb(uv_handle_t* handle, int status) { int r; - ASSERT(handle == &repeat_1); + ASSERT(handle == (uv_handle_t*)&repeat_1); ASSERT(status == 0); - ASSERT(uv_timer_get_repeat(handle) == 50); + ASSERT(uv_timer_get_repeat((uv_timer_t*)handle) == 50); LOGF("repeat_1_cb called after %ld ms\n", (long int)(uv_now() - start_time)); @@ -68,7 +68,7 @@ static void repeat_1_cb(uv_handle_t* handle, int status) { static void repeat_2_cb(uv_handle_t* handle, int status) { - ASSERT(handle == &repeat_2); + ASSERT(handle == (uv_handle_t*) &repeat_2); ASSERT(status == 0); ASSERT(repeat_2_cb_allowed); @@ -76,21 +76,22 @@ static void repeat_2_cb(uv_handle_t* handle, int status) { repeat_2_cb_called++; - if (uv_timer_get_repeat(handle) == 0) { + if (uv_timer_get_repeat(&repeat_2) == 0) { ASSERT(!uv_is_active(handle)); uv_close(handle); return; } - LOGF("uv_timer_get_repeat %ld ms\n", (long int)uv_timer_get_repeat(handle)); - ASSERT(uv_timer_get_repeat(handle) == 100); + LOGF("uv_timer_get_repeat %ld ms\n", + (long int)uv_timer_get_repeat(&repeat_2)); + ASSERT(uv_timer_get_repeat(&repeat_2) == 100); /* This shouldn't take effect immediately. */ uv_timer_set_repeat(&repeat_2, 0); } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) { uv_buf_t buf = {0, 0}; FATAL("alloc should not be called"); return buf; diff --git a/test/test-timer.c b/test/test-timer.c index 15c80def..d68b49c4 100644 --- a/test/test-timer.c +++ b/test/test-timer.c @@ -92,7 +92,7 @@ static void never_cb(uv_handle_t* handle, int status) { } -static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { +static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) { uv_buf_t buf = {0, 0}; FATAL("alloc should not be called"); return buf; @@ -100,8 +100,8 @@ static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { TEST_IMPL(timer) { - uv_handle_t *once; - uv_handle_t repeat, never; + uv_timer_t *once; + uv_timer_t repeat, never; int i, r; uv_init(alloc_cb); @@ -111,7 +111,7 @@ TEST_IMPL(timer) { /* Let 10 timers time out in 500 ms total. */ for (i = 0; i < 10; i++) { - once = (uv_handle_t*)malloc(sizeof(*once)); + once = (uv_timer_t*)malloc(sizeof(*once)); ASSERT(once != NULL); r = uv_timer_init(once, once_close_cb, NULL); ASSERT(r == 0); diff --git a/uv-unix.c b/uv-unix.c index 0b032436..89caa84f 100644 --- a/uv-unix.c +++ b/uv-unix.c @@ -47,8 +47,8 @@ static uv_alloc_cb alloc_cb; void uv__tcp_io(EV_P_ ev_io* watcher, int revents); void uv__next(EV_P_ ev_idle* watcher, int revents); -static void uv__tcp_connect(uv_handle_t* handle); -int uv_tcp_open(uv_handle_t*, int fd); +static void uv__tcp_connect(uv_tcp_t*); +int uv_tcp_open(uv_tcp_t*, int fd); static void uv__finish_close(uv_handle_t* handle); @@ -134,34 +134,41 @@ struct sockaddr_in uv_ip4_addr(char* ip, int port) { int uv_close(uv_handle_t* handle) { + uv_tcp_t* tcp; + uv_async_t* async; + uv_timer_t* timer; + switch (handle->type) { case UV_TCP: - ev_io_stop(EV_DEFAULT_ &handle->write_watcher); - ev_io_stop(EV_DEFAULT_ &handle->read_watcher); + tcp = (uv_tcp_t*) handle; + ev_io_stop(EV_DEFAULT_ &tcp->write_watcher); + ev_io_stop(EV_DEFAULT_ &tcp->read_watcher); break; case UV_PREPARE: - uv_prepare_stop(handle); + uv_prepare_stop((uv_prepare_t*) handle); break; case UV_CHECK: - uv_check_stop(handle); + uv_check_stop((uv_check_t*) handle); break; case UV_IDLE: - uv_idle_stop(handle); + uv_idle_stop((uv_idle_t*) handle); break; case UV_ASYNC: - ev_async_stop(EV_DEFAULT_ &handle->async_watcher); + async = (uv_async_t*)handle; + ev_async_stop(EV_DEFAULT_ &async->async_watcher); ev_ref(EV_DEFAULT_UC); break; case UV_TIMER: - if (ev_is_active(&handle->timer_watcher)) { + timer = (uv_timer_t*)handle; + if (ev_is_active(&timer->timer_watcher)) { ev_ref(EV_DEFAULT_UC); } - ev_timer_stop(EV_DEFAULT_ &handle->timer_watcher); + ev_timer_stop(EV_DEFAULT_ &timer->timer_watcher); break; default: @@ -214,49 +221,48 @@ static void uv__handle_init(uv_handle_t* handle, uv_handle_type type, } -int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb, - void* data) { - uv__handle_init(handle, UV_TCP, close_cb, data); +int uv_tcp_init(uv_tcp_t* tcp, uv_close_cb close_cb, void* data) { + uv__handle_init((uv_handle_t*)tcp, UV_TCP, close_cb, data); - handle->connect_req = NULL; - handle->accepted_fd = -1; - handle->fd = -1; - handle->delayed_error = 0; - ngx_queue_init(&handle->write_queue); - handle->write_queue_size = 0; + tcp->connect_req = NULL; + tcp->accepted_fd = -1; + tcp->fd = -1; + tcp->delayed_error = 0; + ngx_queue_init(&tcp->write_queue); + tcp->write_queue_size = 0; - ev_init(&handle->read_watcher, uv__tcp_io); - handle->read_watcher.data = handle; + ev_init(&tcp->read_watcher, uv__tcp_io); + tcp->read_watcher.data = tcp; - ev_init(&handle->write_watcher, uv__tcp_io); - handle->write_watcher.data = handle; + ev_init(&tcp->write_watcher, uv__tcp_io); + tcp->write_watcher.data = tcp; - assert(ngx_queue_empty(&handle->write_queue)); - assert(handle->write_queue_size == 0); + assert(ngx_queue_empty(&tcp->write_queue)); + assert(tcp->write_queue_size == 0); return 0; } -int uv_bind(uv_handle_t* handle, struct sockaddr* addr) { +int uv_bind(uv_tcp_t* tcp, struct sockaddr* addr) { int addrsize; int domain; int r; - if (handle->fd <= 0) { + if (tcp->fd <= 0) { int fd = socket(AF_INET, SOCK_STREAM, 0); if (fd < 0) { - uv_err_new(handle, errno); + uv_err_new((uv_handle_t*)tcp, errno); return -1; } - if (uv_tcp_open(handle, fd)) { + if (uv_tcp_open(tcp, fd)) { close(fd); return -2; } } - assert(handle->fd >= 0); + assert(tcp->fd >= 0); if (addr->sa_family == AF_INET) { addrsize = sizeof(struct sockaddr_in); @@ -265,21 +271,21 @@ int uv_bind(uv_handle_t* handle, struct sockaddr* addr) { addrsize = sizeof(struct sockaddr_in6); domain = AF_INET6; } else { - uv_err_new(handle, EFAULT); + uv_err_new((uv_handle_t*)tcp, EFAULT); return -1; } - r = bind(handle->fd, addr, addrsize); - handle->delayed_error = 0; + r = bind(tcp->fd, addr, addrsize); + tcp->delayed_error = 0; if (r) { switch (errno) { case EADDRINUSE: - handle->delayed_error = errno; + tcp->delayed_error = errno; return 0; default: - uv_err_new(handle, errno); + uv_err_new((uv_handle_t*)tcp, errno); return -1; } } @@ -288,9 +294,9 @@ int uv_bind(uv_handle_t* handle, struct sockaddr* addr) { } -int uv_tcp_open(uv_handle_t* handle, int fd) { +int uv_tcp_open(uv_tcp_t* tcp, int fd) { assert(fd >= 0); - handle->fd = fd; + tcp->fd = fd; /* Set non-blocking. */ int yes = 1; @@ -302,30 +308,30 @@ int uv_tcp_open(uv_handle_t* handle, int fd) { assert(r == 0); /* Associate the fd with each ev_io watcher. */ - ev_io_set(&handle->read_watcher, fd, EV_READ); - ev_io_set(&handle->write_watcher, fd, EV_WRITE); + ev_io_set(&tcp->read_watcher, fd, EV_READ); + ev_io_set(&tcp->write_watcher, fd, EV_WRITE); /* These should have been set up by uv_tcp_init. */ - assert(handle->next_watcher.data == handle); - assert(handle->write_watcher.data == handle); - assert(handle->read_watcher.data == handle); - assert(handle->read_watcher.cb == uv__tcp_io); - assert(handle->write_watcher.cb == uv__tcp_io); + assert(tcp->next_watcher.data == tcp); + assert(tcp->write_watcher.data == tcp); + assert(tcp->read_watcher.data == tcp); + assert(tcp->read_watcher.cb == uv__tcp_io); + assert(tcp->write_watcher.cb == uv__tcp_io); return 0; } void uv__server_io(EV_P_ ev_io* watcher, int revents) { - uv_handle_t* handle = watcher->data; - assert(watcher == &handle->read_watcher || - watcher == &handle->write_watcher); + uv_tcp_t* tcp = watcher->data; + assert(watcher == &tcp->read_watcher || + watcher == &tcp->write_watcher); assert(revents == EV_READ); - assert(!uv_flag_is_set(handle, UV_CLOSING)); + assert(!uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING)); - if (handle->accepted_fd >= 0) { - ev_io_stop(EV_DEFAULT_ &handle->read_watcher); + if (tcp->accepted_fd >= 0) { + ev_io_stop(EV_DEFAULT_ &tcp->read_watcher); return; } @@ -333,8 +339,8 @@ void uv__server_io(EV_P_ ev_io* watcher, int revents) { struct sockaddr addr = { 0 }; socklen_t addrlen = 0; - assert(handle->accepted_fd < 0); - int fd = accept(handle->fd, &addr, &addrlen); + assert(tcp->accepted_fd < 0); + int fd = accept(tcp->fd, &addr, &addrlen); if (fd < 0) { if (errno == EAGAIN) { @@ -344,16 +350,16 @@ void uv__server_io(EV_P_ ev_io* watcher, int revents) { /* TODO special trick. unlock reserved socket, accept, close. */ return; } else { - uv_err_new(handle, errno); - uv_close(handle); + uv_err_new((uv_handle_t*)tcp, errno); + uv_close((uv_handle_t*)tcp); } } else { - handle->accepted_fd = fd; - handle->accept_cb(handle); - if (handle->accepted_fd >= 0) { + tcp->accepted_fd = fd; + tcp->accept_cb(tcp); + if (tcp->accepted_fd >= 0) { /* The user hasn't yet accepted called uv_accept() */ - ev_io_stop(EV_DEFAULT_ &handle->read_watcher); + ev_io_stop(EV_DEFAULT_ &tcp->read_watcher); return; } } @@ -361,7 +367,7 @@ void uv__server_io(EV_P_ ev_io* watcher, int revents) { } -int uv_accept(uv_handle_t* server, uv_handle_t* client, +int uv_accept(uv_tcp_t* server, uv_tcp_t* client, uv_close_cb close_cb, void* data) { if (server->accepted_fd < 0) { return -1; @@ -384,32 +390,34 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client, } -int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) { - assert(handle->fd >= 0); +int uv_listen(uv_tcp_t* tcp, int backlog, uv_accept_cb cb) { + assert(tcp->fd >= 0); - if (handle->delayed_error) { - uv_err_new(handle, handle->delayed_error); + if (tcp->delayed_error) { + uv_err_new((uv_handle_t*)tcp, tcp->delayed_error); return -1; } - int r = listen(handle->fd, backlog); + int r = listen(tcp->fd, backlog); if (r < 0) { - uv_err_new(handle, errno); + uv_err_new((uv_handle_t*)tcp, errno); return -1; } - handle->accept_cb = cb; + tcp->accept_cb = cb; /* Start listening for connections. */ - ev_io_set(&handle->read_watcher, handle->fd, EV_READ); - ev_set_cb(&handle->read_watcher, uv__server_io); - ev_io_start(EV_DEFAULT_ &handle->read_watcher); + ev_io_set(&tcp->read_watcher, tcp->fd, EV_READ); + ev_set_cb(&tcp->read_watcher, uv__server_io); + ev_io_start(EV_DEFAULT_ &tcp->read_watcher); return 0; } void uv__finish_close(uv_handle_t* handle) { + uv_tcp_t* tcp; + assert(uv_flag_is_set(handle, UV_CLOSING)); assert(!uv_flag_is_set(handle, UV_CLOSED)); uv_flag_set(handle, UV_CLOSED); @@ -419,39 +427,40 @@ void uv__finish_close(uv_handle_t* handle) { /* XXX Is it necessary to stop these watchers here? weren't they * supposed to be stopped in uv_close()? */ - ev_io_stop(EV_DEFAULT_ &handle->write_watcher); - ev_io_stop(EV_DEFAULT_ &handle->read_watcher); + tcp = (uv_tcp_t*)handle; + ev_io_stop(EV_DEFAULT_ &tcp->write_watcher); + ev_io_stop(EV_DEFAULT_ &tcp->read_watcher); - assert(!ev_is_active(&handle->read_watcher)); - assert(!ev_is_active(&handle->write_watcher)); + assert(!ev_is_active(&tcp->read_watcher)); + assert(!ev_is_active(&tcp->write_watcher)); - close(handle->fd); - handle->fd = -1; + close(tcp->fd); + tcp->fd = -1; - if (handle->accepted_fd >= 0) { - close(handle->accepted_fd); - handle->accepted_fd = -1; + if (tcp->accepted_fd >= 0) { + close(tcp->accepted_fd); + tcp->accepted_fd = -1; } break; case UV_PREPARE: - assert(!ev_is_active(&handle->prepare_watcher)); + assert(!ev_is_active(&((uv_prepare_t*)handle)->prepare_watcher)); break; case UV_CHECK: - assert(!ev_is_active(&handle->check_watcher)); + assert(!ev_is_active(&((uv_check_t*)handle)->check_watcher)); break; case UV_IDLE: - assert(!ev_is_active(&handle->idle_watcher)); + assert(!ev_is_active(&((uv_idle_t*)handle)->idle_watcher)); break; case UV_ASYNC: - assert(!ev_is_active(&handle->async_watcher)); + assert(!ev_is_active(&((uv_async_t*)handle)->async_watcher)); break; case UV_TIMER: - assert(!ev_is_active(&handle->timer_watcher)); + assert(!ev_is_active(&((uv_timer_t*)handle)->timer_watcher)); break; default: @@ -469,12 +478,12 @@ void uv__finish_close(uv_handle_t* handle) { } -uv_req_t* uv_write_queue_head(uv_handle_t* handle) { - if (ngx_queue_empty(&handle->write_queue)) { +uv_req_t* uv_write_queue_head(uv_tcp_t* tcp) { + if (ngx_queue_empty(&tcp->write_queue)) { return NULL; } - ngx_queue_t* q = ngx_queue_head(&handle->write_queue); + ngx_queue_t* q = ngx_queue_head(&tcp->write_queue); if (!q) { return NULL; } @@ -499,49 +508,49 @@ void uv__next(EV_P_ ev_idle* watcher, int revents) { } -static void uv__drain(uv_handle_t* handle) { - assert(!uv_write_queue_head(handle)); - assert(handle->write_queue_size == 0); +static void uv__drain(uv_tcp_t* tcp) { + assert(!uv_write_queue_head(tcp)); + assert(tcp->write_queue_size == 0); - ev_io_stop(EV_DEFAULT_ &handle->write_watcher); + ev_io_stop(EV_DEFAULT_ &tcp->write_watcher); /* Shutdown? */ - if (uv_flag_is_set(handle, UV_SHUTTING) && - !uv_flag_is_set(handle, UV_CLOSING) && - !uv_flag_is_set(handle, UV_SHUT)) { - assert(handle->shutdown_req); + if (uv_flag_is_set((uv_handle_t*)tcp, UV_SHUTTING) && + !uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING) && + !uv_flag_is_set((uv_handle_t*)tcp, UV_SHUT)) { + assert(tcp->shutdown_req); - uv_req_t* req = handle->shutdown_req; + uv_req_t* req = tcp->shutdown_req; uv_shutdown_cb cb = req->cb; - if (shutdown(handle->fd, SHUT_WR)) { - /* Error. Nothing we can do, close the handle. */ - uv_err_new(handle, errno); - uv_close(handle); + if (shutdown(tcp->fd, SHUT_WR)) { + /* Error. Nothing we can do, close the tcp. */ + uv_err_new((uv_handle_t*)tcp, errno); + uv_close((uv_handle_t*)tcp); if (cb) cb(req, -1); } else { - uv_err_new(handle, 0); - uv_flag_set(handle, UV_SHUT); + uv_err_new((uv_handle_t*)tcp, 0); + uv_flag_set((uv_handle_t*)tcp, UV_SHUT); if (cb) cb(req, 0); } } } -void uv__write(uv_handle_t* handle) { - assert(handle->fd >= 0); +void uv__write(uv_tcp_t* tcp) { + assert(tcp->fd >= 0); /* TODO: should probably while(1) here until EAGAIN */ /* Get the request at the head of the queue. */ - uv_req_t* req = uv_write_queue_head(handle); + uv_req_t* req = uv_write_queue_head(tcp); if (!req) { - assert(handle->write_queue_size == 0); - uv__drain(handle); + assert(tcp->write_queue_size == 0); + uv__drain(tcp); return; } - assert(req->handle == handle); + assert(req->handle == (uv_handle_t*)tcp); /* Cast to iovec. We had to have our own uv_buf_t instead of iovec * because Windows's WSABUF is not an iovec. @@ -554,16 +563,16 @@ void uv__write(uv_handle_t* handle) { * inside the iov each time we write. So there is no need to offset it. */ - ssize_t n = writev(handle->fd, iov, iovcnt); + ssize_t n = writev(tcp->fd, iov, iovcnt); uv_write_cb cb = req->cb; if (n < 0) { if (errno != EAGAIN) { - uv_err_t err = uv_err_new(handle, errno); + uv_err_t err = uv_err_new((uv_handle_t*)tcp, errno); - /* XXX How do we handle the error? Need test coverage here. */ - uv_close(handle); + /* XXX How do we tcp the error? Need test coverage here. */ + uv_close((uv_handle_t*)tcp); if (cb) { cb(req, -1); @@ -583,7 +592,7 @@ void uv__write(uv_handle_t* handle) { if (n < len) { buf->base += n; buf->len -= n; - handle->write_queue_size -= n; + tcp->write_queue_size -= n; n = 0; /* There is more to write. Break and ensure the watcher is pending. */ @@ -596,14 +605,14 @@ void uv__write(uv_handle_t* handle) { assert(n >= len); n -= len; - assert(handle->write_queue_size >= len); - handle->write_queue_size -= len; + assert(tcp->write_queue_size >= len); + tcp->write_queue_size -= len; if (req->write_index == req->bufcnt) { /* Then we're done! */ assert(n == 0); - /* Pop the req off handle->write_queue. */ + /* Pop the req off tcp->write_queue. */ ngx_queue_remove(&req->queue); free(req->bufs); /* FIXME: we should not be allocing for each read */ req->bufs = NULL; @@ -613,11 +622,11 @@ void uv__write(uv_handle_t* handle) { cb(req, 0); } - if (!ngx_queue_empty(&handle->write_queue)) { - assert(handle->write_queue_size > 0); + if (!ngx_queue_empty(&tcp->write_queue)) { + assert(tcp->write_queue_size > 0); } else { /* Write queue drained. */ - uv__drain(handle); + uv__drain(tcp); } return; @@ -630,100 +639,101 @@ void uv__write(uv_handle_t* handle) { assert(n == 0 || n == -1); /* We're not done yet. */ - assert(ev_is_active(&handle->write_watcher)); - ev_io_start(EV_DEFAULT_ &handle->write_watcher); + assert(ev_is_active(&tcp->write_watcher)); + ev_io_start(EV_DEFAULT_ &tcp->write_watcher); } -void uv__read(uv_handle_t* handle) { +void uv__read(uv_tcp_t* tcp) { /* XXX: Maybe instead of having UV_READING we just test if - * handle->read_cb is NULL or not? + * tcp->read_cb is NULL or not? */ - while (handle->read_cb && uv_flag_is_set(handle, UV_READING)) { + while (tcp->read_cb && uv_flag_is_set((uv_handle_t*)tcp, UV_READING)) { assert(alloc_cb); - uv_buf_t buf = alloc_cb(handle, 64 * 1024); + uv_buf_t buf = alloc_cb(tcp, 64 * 1024); assert(buf.len > 0); assert(buf.base); struct iovec* iov = (struct iovec*) &buf; - ssize_t nread = readv(handle->fd, iov, 1); + ssize_t nread = readv(tcp->fd, iov, 1); if (nread < 0) { /* Error */ if (errno == EAGAIN) { /* Wait for the next one. */ - if (uv_flag_is_set(handle, UV_READING)) { - ev_io_start(EV_DEFAULT_UC_ &handle->read_watcher); + if (uv_flag_is_set((uv_handle_t*)tcp, UV_READING)) { + ev_io_start(EV_DEFAULT_UC_ &tcp->read_watcher); } - uv_err_new(handle, EAGAIN); - handle->read_cb(handle, 0, buf); + uv_err_new((uv_handle_t*)tcp, EAGAIN); + tcp->read_cb(tcp, 0, buf); return; } else { - uv_err_new(handle, errno); - uv_close(handle); - handle->read_cb(handle, -1, buf); - assert(!ev_is_active(&handle->read_watcher)); + uv_err_new((uv_handle_t*)tcp, errno); + uv_close((uv_handle_t*)tcp); + tcp->read_cb(tcp, -1, buf); + assert(!ev_is_active(&tcp->read_watcher)); return; } } else if (nread == 0) { /* EOF */ - uv_err_new_artificial(handle, UV_EOF); - ev_io_stop(EV_DEFAULT_UC_ &handle->read_watcher); - handle->read_cb(handle, -1, buf); + uv_err_new_artificial((uv_handle_t*)tcp, UV_EOF); + ev_io_stop(EV_DEFAULT_UC_ &tcp->read_watcher); + tcp->read_cb(tcp, -1, buf); - if (uv_flag_is_set(handle, UV_SHUT)) { - uv_close(handle); + if (uv_flag_is_set((uv_handle_t*)tcp, UV_SHUT)) { + uv_close((uv_handle_t*)tcp); } return; } else { /* Successful read */ - handle->read_cb(handle, nread, buf); + tcp->read_cb(tcp, nread, buf); } } } int uv_shutdown(uv_req_t* req) { - uv_handle_t* handle = req->handle; - assert(handle->fd >= 0); + uv_tcp_t* tcp = (uv_tcp_t*)req->handle; + assert(tcp->fd >= 0); + assert(tcp->type == UV_TCP); - if (uv_flag_is_set(handle, UV_SHUT) || - uv_flag_is_set(handle, UV_CLOSED) || - uv_flag_is_set(handle, UV_CLOSING)) { + if (uv_flag_is_set((uv_handle_t*)tcp, UV_SHUT) || + uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSED) || + uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING)) { return -1; } - handle->shutdown_req = req; + tcp->shutdown_req = req; req->type = UV_SHUTDOWN; - uv_flag_set(handle, UV_SHUTTING); + uv_flag_set((uv_handle_t*)tcp, UV_SHUTTING); - ev_io_start(EV_DEFAULT_UC_ &handle->write_watcher); + ev_io_start(EV_DEFAULT_UC_ &tcp->write_watcher); return 0; } void uv__tcp_io(EV_P_ ev_io* watcher, int revents) { - uv_handle_t* handle = watcher->data; - assert(watcher == &handle->read_watcher || - watcher == &handle->write_watcher); + uv_tcp_t* tcp = watcher->data; + assert(watcher == &tcp->read_watcher || + watcher == &tcp->write_watcher); - assert(handle->fd >= 0); + assert(tcp->fd >= 0); - assert(!uv_flag_is_set(handle, UV_CLOSING)); + assert(!uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING)); - if (handle->connect_req) { - uv__tcp_connect(handle); + if (tcp->connect_req) { + uv__tcp_connect(tcp); } else { if (revents & EV_READ) { - uv__read(handle); + uv__read(tcp); } if (revents & EV_WRITE) { - uv__write(handle); + uv__write(tcp); } } } @@ -734,32 +744,32 @@ void uv__tcp_io(EV_P_ ev_io* watcher, int revents) { * In order to determine if we've errored out or succeeded must call * getsockopt. */ -static void uv__tcp_connect(uv_handle_t* handle) { +static void uv__tcp_connect(uv_tcp_t* tcp) { int error; socklen_t errorsize = sizeof(int); - assert(handle->fd >= 0); + assert(tcp->fd >= 0); - uv_req_t* req = handle->connect_req; + uv_req_t* req = tcp->connect_req; assert(req); - if (handle->delayed_error) { + if (tcp->delayed_error) { /* To smooth over the differences between unixes errors that * were reported synchronously on the first connect can be delayed * until the next tick--which is now. */ - error = handle->delayed_error; - handle->delayed_error = 0; + error = tcp->delayed_error; + tcp->delayed_error = 0; } else { /* Normal situation: we need to get the socket error from the kernel. */ - getsockopt(handle->fd, SOL_SOCKET, SO_ERROR, &error, &errorsize); + getsockopt(tcp->fd, SOL_SOCKET, SO_ERROR, &error, &errorsize); } if (!error) { - ev_io_start(EV_DEFAULT_ &handle->read_watcher); + ev_io_start(EV_DEFAULT_ &tcp->read_watcher); /* Successful connection */ - handle->connect_req = NULL; + tcp->connect_req = NULL; uv_connect_cb connect_cb = req->cb; if (connect_cb) { connect_cb(req, 0); @@ -770,31 +780,31 @@ static void uv__tcp_connect(uv_handle_t* handle) { return; } else { /* Error */ - uv_err_t err = uv_err_new(handle, error); + uv_err_t err = uv_err_new((uv_handle_t*)tcp, error); - handle->connect_req = NULL; + tcp->connect_req = NULL; uv_connect_cb connect_cb = req->cb; if (connect_cb) { connect_cb(req, -1); } - uv_close(handle); + uv_close((uv_handle_t*)tcp); } } int uv_connect(uv_req_t* req, struct sockaddr* addr) { - uv_handle_t* handle = req->handle; + uv_tcp_t* tcp = (uv_tcp_t*)req->handle; - if (handle->fd <= 0) { + if (tcp->fd <= 0) { int fd = socket(AF_INET, SOCK_STREAM, 0); if (fd < 0) { - uv_err_new(handle, errno); + uv_err_new((uv_handle_t*)tcp, errno); return -1; } - if (uv_tcp_open(handle, fd)) { + if (uv_tcp_open(tcp, fd)) { close(fd); return -2; } @@ -803,22 +813,22 @@ int uv_connect(uv_req_t* req, struct sockaddr* addr) { req->type = UV_CONNECT; ngx_queue_init(&req->queue); - if (handle->connect_req) { - uv_err_new(handle, EALREADY); + if (tcp->connect_req) { + uv_err_new((uv_handle_t*)tcp, EALREADY); return -1; } - if (handle->type != UV_TCP) { - uv_err_new(handle, ENOTSOCK); + if (tcp->type != UV_TCP) { + uv_err_new((uv_handle_t*)tcp, ENOTSOCK); return -1; } - handle->connect_req = req; + tcp->connect_req = req; int addrsize = sizeof(struct sockaddr_in); - int r = connect(handle->fd, addr, addrsize); - handle->delayed_error = 0; + int r = connect(tcp->fd, addr, addrsize); + tcp->delayed_error = 0; if (r != 0 && errno != EINPROGRESS) { switch (errno) { @@ -827,20 +837,20 @@ int uv_connect(uv_req_t* req, struct sockaddr* addr) { * wait. */ case ECONNREFUSED: - handle->delayed_error = errno; + tcp->delayed_error = errno; break; default: - uv_err_new(handle, errno); + uv_err_new((uv_handle_t*)tcp, errno); return -1; } } - assert(handle->write_watcher.data == handle); - ev_io_start(EV_DEFAULT_ &handle->write_watcher); + assert(tcp->write_watcher.data == tcp); + ev_io_start(EV_DEFAULT_ &tcp->write_watcher); - if (handle->delayed_error) { - ev_feed_event(EV_DEFAULT_ &handle->write_watcher, EV_WRITE); + if (tcp->delayed_error) { + ev_feed_event(EV_DEFAULT_ &tcp->write_watcher, EV_WRITE); } return 0; @@ -863,8 +873,8 @@ static size_t uv__buf_count(uv_buf_t bufs[], int bufcnt) { * This is not required for the uv_buf_t array. */ int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) { - uv_handle_t* handle = req->handle; - assert(handle->fd >= 0); + uv_tcp_t* tcp = (uv_tcp_t*)req->handle; + assert(tcp->fd >= 0); ngx_queue_init(&req->queue); req->type = UV_WRITE; @@ -875,17 +885,17 @@ int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) { req->bufcnt = bufcnt; req->write_index = 0; - handle->write_queue_size += uv__buf_count(bufs, bufcnt); + tcp->write_queue_size += uv__buf_count(bufs, bufcnt); /* Append the request to write_queue. */ - ngx_queue_insert_tail(&handle->write_queue, &req->queue); + ngx_queue_insert_tail(&tcp->write_queue, &req->queue); - assert(!ngx_queue_empty(&handle->write_queue)); - assert(handle->write_watcher.cb == uv__tcp_io); - assert(handle->write_watcher.data == handle); - assert(handle->write_watcher.fd == handle->fd); + assert(!ngx_queue_empty(&tcp->write_queue)); + assert(tcp->write_watcher.cb == uv__tcp_io); + assert(tcp->write_watcher.data == tcp); + assert(tcp->write_watcher.fd == tcp->fd); - ev_io_start(EV_DEFAULT_ &handle->write_watcher); + ev_io_start(EV_DEFAULT_ &tcp->write_watcher); return 0; } @@ -911,44 +921,37 @@ int64_t uv_now() { } -int uv_read_start(uv_handle_t* handle, uv_read_cb cb) { - /* The UV_READING flag is irrelevant of the state of the handle - it just +int uv_read_start(uv_tcp_t* tcp, uv_read_cb cb) { + /* The UV_READING flag is irrelevant of the state of the tcp - it just * expresses the desired state of the user. */ - uv_flag_set(handle, UV_READING); + uv_flag_set((uv_handle_t*)tcp, UV_READING); /* TODO: try to do the read inline? */ - /* TODO: keep track of handle state. If we've gotten a EOF then we should + /* TODO: keep track of tcp state. If we've gotten a EOF then we should * not start the IO watcher. */ - assert(handle->fd >= 0); - handle->read_cb = cb; + assert(tcp->fd >= 0); + tcp->read_cb = cb; /* These should have been set by uv_tcp_init. */ - assert(handle->read_watcher.data == handle); - assert(handle->read_watcher.cb == uv__tcp_io); + assert(tcp->read_watcher.data == tcp); + assert(tcp->read_watcher.cb == uv__tcp_io); - ev_io_start(EV_DEFAULT_UC_ &handle->read_watcher); + ev_io_start(EV_DEFAULT_UC_ &tcp->read_watcher); return 0; } -int uv_read_stop(uv_handle_t* handle) { - uv_flag_unset(handle, UV_READING); +int uv_read_stop(uv_tcp_t* tcp) { + uv_flag_unset((uv_handle_t*)tcp, UV_READING); - ev_io_stop(EV_DEFAULT_UC_ &handle->read_watcher); - handle->read_cb = NULL; + ev_io_stop(EV_DEFAULT_UC_ &tcp->read_watcher); + tcp->read_cb = NULL; return 0; } -void uv_free(uv_handle_t* handle) { - free(handle); - /* lists? */ - return; -} - - void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb) { req->type = UV_UNKNOWN_REQ; req->cb = cb; @@ -958,30 +961,32 @@ void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb) { static void uv__prepare(EV_P_ ev_prepare* w, int revents) { - uv_handle_t* handle = (uv_handle_t*)(w->data); + uv_prepare_t* prepare = w->data; - if (handle->prepare_cb) handle->prepare_cb(handle, 0); + if (prepare->prepare_cb) { + prepare->prepare_cb((uv_handle_t*)prepare, 0); + } } -int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) { - uv__handle_init(handle, UV_PREPARE, close_cb, data); +int uv_prepare_init(uv_prepare_t* prepare, uv_close_cb close_cb, void* data) { + uv__handle_init((uv_handle_t*)prepare, UV_PREPARE, close_cb, data); - ev_prepare_init(&handle->prepare_watcher, uv__prepare); - handle->prepare_watcher.data = handle; + ev_prepare_init(&prepare->prepare_watcher, uv__prepare); + prepare->prepare_watcher.data = prepare; - handle->prepare_cb = NULL; + prepare->prepare_cb = NULL; return 0; } -int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb) { - int was_active = ev_is_active(&handle->prepare_watcher); +int uv_prepare_start(uv_prepare_t* prepare, uv_loop_cb cb) { + int was_active = ev_is_active(&prepare->prepare_watcher); - handle->prepare_cb = cb; + prepare->prepare_cb = cb; - ev_prepare_start(EV_DEFAULT_UC_ &handle->prepare_watcher); + ev_prepare_start(EV_DEFAULT_UC_ &prepare->prepare_watcher); if (!was_active) { ev_unref(EV_DEFAULT_UC); @@ -991,10 +996,10 @@ int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb) { } -int uv_prepare_stop(uv_handle_t* handle) { - int was_active = ev_is_active(&handle->prepare_watcher); +int uv_prepare_stop(uv_prepare_t* prepare) { + int was_active = ev_is_active(&prepare->prepare_watcher); - ev_prepare_stop(EV_DEFAULT_UC_ &handle->prepare_watcher); + ev_prepare_stop(EV_DEFAULT_UC_ &prepare->prepare_watcher); if (was_active) { ev_ref(EV_DEFAULT_UC); @@ -1005,30 +1010,32 @@ int uv_prepare_stop(uv_handle_t* handle) { static void uv__check(EV_P_ ev_check* w, int revents) { - uv_handle_t* handle = (uv_handle_t*)(w->data); + uv_check_t* check = w->data; - if (handle->check_cb) handle->check_cb(handle, 0); + if (check->check_cb) { + check->check_cb((uv_handle_t*)check, 0); + } } -int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) { - uv__handle_init(handle, UV_CHECK, close_cb, data); +int uv_check_init(uv_check_t* check, uv_close_cb close_cb, void* data) { + uv__handle_init((uv_handle_t*)check, UV_CHECK, close_cb, data); - ev_check_init(&handle->check_watcher, uv__check); - handle->check_watcher.data = handle; + ev_check_init(&check->check_watcher, uv__check); + check->check_watcher.data = check; - handle->check_cb = NULL; + check->check_cb = NULL; return 0; } -int uv_check_start(uv_handle_t* handle, uv_loop_cb cb) { - int was_active = ev_is_active(&handle->check_watcher); +int uv_check_start(uv_check_t* check, uv_loop_cb cb) { + int was_active = ev_is_active(&check->check_watcher); - handle->check_cb = cb; + check->check_cb = cb; - ev_check_start(EV_DEFAULT_UC_ &handle->check_watcher); + ev_check_start(EV_DEFAULT_UC_ &check->check_watcher); if (!was_active) { ev_unref(EV_DEFAULT_UC); @@ -1038,10 +1045,10 @@ int uv_check_start(uv_handle_t* handle, uv_loop_cb cb) { } -int uv_check_stop(uv_handle_t* handle) { - int was_active = ev_is_active(&handle->check_watcher); +int uv_check_stop(uv_check_t* check) { + int was_active = ev_is_active(&check->check_watcher); - ev_check_stop(EV_DEFAULT_UC_ &handle->check_watcher); + ev_check_stop(EV_DEFAULT_UC_ &check->check_watcher); if (was_active) { ev_ref(EV_DEFAULT_UC); @@ -1052,30 +1059,32 @@ int uv_check_stop(uv_handle_t* handle) { static void uv__idle(EV_P_ ev_idle* w, int revents) { - uv_handle_t* handle = (uv_handle_t*)(w->data); + uv_idle_t* idle = (uv_idle_t*)(w->data); - if (handle->idle_cb) handle->idle_cb(handle, 0); + if (idle->idle_cb) { + idle->idle_cb((uv_handle_t*)idle, 0); + } } -int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) { - uv__handle_init(handle, UV_IDLE, close_cb, data); +int uv_idle_init(uv_idle_t* idle, uv_close_cb close_cb, void* data) { + uv__handle_init((uv_handle_t*)idle, UV_IDLE, close_cb, data); - ev_idle_init(&handle->idle_watcher, uv__idle); - handle->idle_watcher.data = handle; + ev_idle_init(&idle->idle_watcher, uv__idle); + idle->idle_watcher.data = idle; - handle->idle_cb = NULL; + idle->idle_cb = NULL; return 0; } -int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb) { - int was_active = ev_is_active(&handle->idle_watcher); +int uv_idle_start(uv_idle_t* idle, uv_loop_cb cb) { + int was_active = ev_is_active(&idle->idle_watcher); - handle->idle_cb = cb; - ev_idle_start(EV_DEFAULT_UC_ &handle->idle_watcher); + idle->idle_cb = cb; + ev_idle_start(EV_DEFAULT_UC_ &idle->idle_watcher); if (!was_active) { ev_unref(EV_DEFAULT_UC); @@ -1085,10 +1094,10 @@ int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb) { } -int uv_idle_stop(uv_handle_t* handle) { - int was_active = ev_is_active(&handle->idle_watcher); +int uv_idle_stop(uv_idle_t* idle) { + int was_active = ev_is_active(&idle->idle_watcher); - ev_idle_stop(EV_DEFAULT_UC_ &handle->idle_watcher); + ev_idle_stop(EV_DEFAULT_UC_ &idle->idle_watcher); if (was_active) { ev_ref(EV_DEFAULT_UC); @@ -1101,16 +1110,16 @@ int uv_idle_stop(uv_handle_t* handle) { int uv_is_active(uv_handle_t* handle) { switch (handle->type) { case UV_TIMER: - return ev_is_active(&handle->timer_watcher); + return ev_is_active(&((uv_timer_t*)handle)->timer_watcher); case UV_PREPARE: - return ev_is_active(&handle->prepare_watcher); + return ev_is_active(&((uv_prepare_t*)handle)->prepare_watcher); case UV_CHECK: - return ev_is_active(&handle->check_watcher); + return ev_is_active(&((uv_check_t*)handle)->check_watcher); case UV_IDLE: - return ev_is_active(&handle->idle_watcher); + return ev_is_active(&((uv_idle_t*)handle)->idle_watcher); default: return 1; @@ -1119,99 +1128,104 @@ int uv_is_active(uv_handle_t* handle) { static void uv__async(EV_P_ ev_async* w, int revents) { - uv_handle_t* handle = (uv_handle_t*)(w->data); + uv_async_t* async = w->data; - if (handle->async_cb) handle->async_cb(handle, 0); + if (async->async_cb) { + async->async_cb((uv_handle_t*)async, 0); + } } -int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb, +int uv_async_init(uv_async_t* async, uv_async_cb async_cb, uv_close_cb close_cb, void* data) { - uv__handle_init(handle, UV_ASYNC, close_cb, data); + uv__handle_init((uv_handle_t*)async, UV_ASYNC, close_cb, data); - ev_async_init(&handle->async_watcher, uv__async); - handle->async_watcher.data = handle; + ev_async_init(&async->async_watcher, uv__async); + async->async_watcher.data = async; - handle->async_cb = async_cb; + async->async_cb = async_cb; /* Note: This does not have symmetry with the other libev wrappers. */ - ev_async_start(EV_DEFAULT_UC_ &handle->async_watcher); + ev_async_start(EV_DEFAULT_UC_ &async->async_watcher); ev_unref(EV_DEFAULT_UC); return 0; } -int uv_async_send(uv_handle_t* handle) { - ev_async_send(EV_DEFAULT_UC_ &handle->async_watcher); +int uv_async_send(uv_async_t* async) { + ev_async_send(EV_DEFAULT_UC_ &async->async_watcher); } static void uv__timer_cb(EV_P_ ev_timer* w, int revents) { - uv_handle_t* handle = (uv_handle_t*)(w->data); + uv_timer_t* timer = w->data; if (!ev_is_active(w)) { ev_ref(EV_DEFAULT_UC); } - if (handle->timer_cb) handle->timer_cb(handle, 0); + if (timer->timer_cb) { + timer->timer_cb((uv_handle_t*)timer, 0); + } } -int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) { - uv__handle_init(handle, UV_TIMER, close_cb, data); +int uv_timer_init(uv_timer_t* timer, uv_close_cb close_cb, void* data) { + uv__handle_init((uv_handle_t*)timer, UV_TIMER, close_cb, data); - ev_init(&handle->timer_watcher, uv__timer_cb); - handle->timer_watcher.data = handle; + ev_init(&timer->timer_watcher, uv__timer_cb); + timer->timer_watcher.data = timer; return 0; } -int uv_timer_start(uv_handle_t* handle, uv_loop_cb cb, int64_t timeout, +int uv_timer_start(uv_timer_t* timer, uv_loop_cb cb, int64_t timeout, int64_t repeat) { - if (ev_is_active(&handle->timer_watcher)) { + if (ev_is_active(&timer->timer_watcher)) { return -1; } - handle->timer_cb = cb; - ev_timer_set(&handle->timer_watcher, timeout / 1000.0, repeat / 1000.0); - ev_timer_start(EV_DEFAULT_UC_ &handle->timer_watcher); + timer->timer_cb = cb; + ev_timer_set(&timer->timer_watcher, timeout / 1000.0, repeat / 1000.0); + ev_timer_start(EV_DEFAULT_UC_ &timer->timer_watcher); ev_unref(EV_DEFAULT_UC); return 0; } -int uv_timer_stop(uv_handle_t* handle) { - if (ev_is_active(&handle->timer_watcher)) { +int uv_timer_stop(uv_timer_t* timer) { + if (ev_is_active(&timer->timer_watcher)) { ev_ref(EV_DEFAULT_UC); } - ev_timer_stop(EV_DEFAULT_UC_ &handle->timer_watcher); + ev_timer_stop(EV_DEFAULT_UC_ &timer->timer_watcher); return 0; } -int uv_timer_again(uv_handle_t* handle) { - if (!ev_is_active(&handle->timer_watcher)) { - uv_err_new(handle, EINVAL); +int uv_timer_again(uv_timer_t* timer) { + if (!ev_is_active(&timer->timer_watcher)) { + uv_err_new((uv_handle_t*)timer, EINVAL); return -1; } - ev_timer_again(EV_DEFAULT_UC_ &handle->timer_watcher); + ev_timer_again(EV_DEFAULT_UC_ &timer->timer_watcher); return 0; } -void uv_timer_set_repeat(uv_handle_t* handle, int64_t repeat) { - assert(handle->type == UV_TIMER); - handle->timer_watcher.repeat = repeat / 1000.0; +void uv_timer_set_repeat(uv_timer_t* timer, int64_t repeat) { + assert(timer->type == UV_TIMER); + timer->timer_watcher.repeat = repeat / 1000.0; } -int64_t uv_timer_get_repeat(uv_handle_t* handle) { - assert(handle->type == UV_TIMER); - return (int64_t)(1000 * handle->timer_watcher.repeat); +int64_t uv_timer_get_repeat(uv_timer_t* timer) { + assert(timer->type == UV_TIMER); + return (int64_t)(1000 * timer->timer_watcher.repeat); } + int uv_get_exepath(char* buffer, size_t* size) { if (!buffer || !size) { return -1; @@ -1259,3 +1273,5 @@ int uv_get_exepath(char* buffer, size_t* size) { /* Need to return argv[0] */ #endif } + + diff --git a/uv-unix.h b/uv-unix.h index 6301ac3f..cd52f447 100644 --- a/uv-unix.h +++ b/uv-unix.h @@ -38,7 +38,7 @@ typedef struct { } uv_buf_t; -#define uv_req_private_fields \ +#define UV_REQ_PRIVATE_FIELDS \ int write_index; \ ev_timer timer; \ ngx_queue_t queue; \ @@ -47,11 +47,14 @@ typedef struct { /* TODO: union or classes please! */ -#define uv_handle_private_fields \ +#define UV_HANDLE_PRIVATE_FIELDS \ int fd; \ int flags; \ - ev_idle next_watcher; \ -/* UV_TCP */ \ + ev_idle next_watcher; + + +/* UV_TCP */ +#define UV_TCP_PRIVATE_FIELDS \ int delayed_error; \ uv_read_cb read_cb; \ uv_accept_cb accept_cb; \ @@ -60,20 +63,35 @@ typedef struct { uv_req_t *shutdown_req; \ ev_io read_watcher; \ ev_io write_watcher; \ - ngx_queue_t write_queue; \ + ngx_queue_t write_queue; + + /* UV_PREPARE */ \ +#define UV_PREPARE_PRIVATE_FIELDS \ ev_prepare prepare_watcher; \ - uv_loop_cb prepare_cb; \ -/* UV_CHECK */ \ + uv_loop_cb prepare_cb; + + +/* UV_CHECK */ +#define UV_CHECK_PRIVATE_FIELDS \ ev_check check_watcher; \ - uv_loop_cb check_cb; \ -/* UV_IDLE */ \ + uv_loop_cb check_cb; + + +/* UV_IDLE */ +#define UV_IDLE_PRIVATE_FIELDS \ ev_idle idle_watcher; \ - uv_loop_cb idle_cb; \ -/* UV_ASYNC */ \ + uv_loop_cb idle_cb; + + +/* UV_ASYNC */ +#define UV_ASYNC_PRIVATE_FIELDS \ ev_async async_watcher; \ - uv_loop_cb async_cb; \ -/* UV_TIMER */ \ + uv_loop_cb async_cb; + + +/* UV_TIMER */ +#define UV_TIMER_PRIVATE_FIELDS \ ev_timer timer_watcher; \ uv_loop_cb timer_cb; diff --git a/uv-win.c b/uv-win.c index f84519aa..fd3be778 100644 --- a/uv-win.c +++ b/uv-win.c @@ -141,12 +141,12 @@ static LPFN_TRANSMITFILE pTransmitFile; /* Binary tree used to keep the list of timers sorted. */ -static int uv_timer_compare(uv_handle_t* handle1, uv_handle_t* handle2); -RB_HEAD(uv_timer_s, uv_handle_s); -RB_PROTOTYPE_STATIC(uv_timer_s, uv_handle_s, tree_entry, uv_timer_compare); +static int uv_timer_compare(uv_timer_t* handle1, uv_timer_t* handle2); +RB_HEAD(uv_timer_tree_s, uv_timer_s); +RB_PROTOTYPE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare); /* The head of the timers tree */ -static struct uv_timer_s uv_timers_ = RB_INITIALIZER(uv_timers_); +static struct uv_timer_tree_s uv_timers_ = RB_INITIALIZER(uv_timers_); /* Lists of active uv_prepare / uv_check / uv_idle watchers */ @@ -422,7 +422,7 @@ static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) { } -static int uv_tcp_init_socket(uv_handle_t* handle, uv_close_cb close_cb, +static int uv_tcp_init_socket(uv_tcp_t* handle, uv_close_cb close_cb, void* data, SOCKET socket) { DWORD yes = 1; @@ -464,15 +464,14 @@ static int uv_tcp_init_socket(uv_handle_t* handle, uv_close_cb close_cb, } -static void uv_tcp_init_connection(uv_handle_t* handle) { +static void uv_tcp_init_connection(uv_tcp_t* handle) { handle->flags |= UV_HANDLE_CONNECTION; handle->write_reqs_pending = 0; - uv_req_init(&(handle->read_req), handle, NULL); + uv_req_init(&(handle->read_req), (uv_handle_t*)handle, NULL); } -int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb, - void* data) { +int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data) { SOCKET sock; sock = socket(AF_INET, SOCK_STREAM, 0); @@ -490,7 +489,7 @@ int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb, } -static void uv_tcp_endgame(uv_handle_t* handle) { +static void uv_tcp_endgame(uv_tcp_t* handle) { uv_err_t err; int status; @@ -520,7 +519,7 @@ static void uv_tcp_endgame(uv_handle_t* handle) { !(handle->flags & UV_HANDLE_CLOSING)) { /* Because uv_close will add the handle to the endgame_handles list, */ /* return here and call the close cb the next time. */ - uv_close(handle); + uv_close((uv_handle_t*)handle); return; } @@ -531,7 +530,7 @@ static void uv_tcp_endgame(uv_handle_t* handle) { if (handle->close_cb) { uv_last_error_ = handle->error; - handle->close_cb(handle, handle->error.code == UV_OK ? 0 : 1); + handle->close_cb((uv_handle_t*)handle, handle->error.code == UV_OK ? 0 : 1); } uv_refs_--; @@ -539,13 +538,13 @@ static void uv_tcp_endgame(uv_handle_t* handle) { } -static void uv_timer_endgame(uv_handle_t* handle) { +static void uv_timer_endgame(uv_timer_t* handle) { if (handle->flags & UV_HANDLE_CLOSING) { assert(!(handle->flags & UV_HANDLE_CLOSED)); handle->flags |= UV_HANDLE_CLOSED; if (handle->close_cb) { - handle->close_cb(handle, 0); + handle->close_cb((uv_handle_t*)handle, 0); } uv_refs_--; @@ -567,14 +566,14 @@ static void uv_loop_endgame(uv_handle_t* handle) { } -static void uv_async_endgame(uv_handle_t* handle) { +static void uv_async_endgame(uv_async_t* handle) { if (handle->flags & UV_HANDLE_CLOSING && !handle->async_sent) { assert(!(handle->flags & UV_HANDLE_CLOSED)); handle->flags |= UV_HANDLE_CLOSED; if (handle->close_cb) { - handle->close_cb(handle, 0); + handle->close_cb((uv_handle_t*)handle, 0); } uv_refs_--; @@ -593,11 +592,11 @@ static void uv_call_endgames() { switch (handle->type) { case UV_TCP: - uv_tcp_endgame(handle); + uv_tcp_endgame((uv_tcp_t*)handle); break; case UV_TIMER: - uv_timer_endgame(handle); + uv_timer_endgame((uv_timer_t*)handle); break; case UV_PREPARE: @@ -607,7 +606,7 @@ static void uv_call_endgames() { break; case UV_ASYNC: - uv_async_endgame(handle); + uv_async_endgame((uv_async_t*)handle); break; default: @@ -629,6 +628,8 @@ static void uv_want_endgame(uv_handle_t* handle) { static int uv_close_error(uv_handle_t* handle, uv_err_t e) { + uv_tcp_t* tcp; + if (handle->flags & UV_HANDLE_CLOSING) { return 0; } @@ -639,34 +640,35 @@ static int uv_close_error(uv_handle_t* handle, uv_err_t e) { /* Handle-specific close actions */ switch (handle->type) { case UV_TCP: - closesocket(handle->socket); - if (handle->reqs_pending == 0) { + tcp = (uv_tcp_t*)handle; + closesocket(tcp->socket); + if (tcp->reqs_pending == 0) { uv_want_endgame(handle); } return 0; case UV_TIMER: - uv_timer_stop(handle); + uv_timer_stop((uv_timer_t*)handle); uv_want_endgame(handle); return 0; case UV_PREPARE: - uv_prepare_stop(handle); + uv_prepare_stop((uv_prepare_t*)handle); uv_want_endgame(handle); return 0; case UV_CHECK: - uv_check_stop(handle); + uv_check_stop((uv_check_t*)handle); uv_want_endgame(handle); return 0; case UV_IDLE: - uv_idle_stop(handle); + uv_idle_stop((uv_idle_t*)handle); uv_want_endgame(handle); return 0; case UV_ASYNC: - if (!handle->async_sent) { + if (!((uv_async_t*)handle)->async_sent) { uv_want_endgame(handle); } return 0; @@ -695,7 +697,7 @@ struct sockaddr_in uv_ip4_addr(char* ip, int port) { } -int uv_bind(uv_handle_t* handle, struct sockaddr* addr) { +int uv_bind(uv_tcp_t* handle, struct sockaddr* addr) { int addrsize; DWORD err; @@ -726,7 +728,7 @@ int uv_bind(uv_handle_t* handle, struct sockaddr* addr) { } -static void uv_queue_accept(uv_handle_t* handle) { +static void uv_queue_accept(uv_tcp_t* handle) { uv_req_t* req; BOOL success; DWORD bytes; @@ -737,7 +739,7 @@ static void uv_queue_accept(uv_handle_t* handle) { accept_socket = socket(AF_INET, SOCK_STREAM, 0); if (accept_socket == INVALID_SOCKET) { - uv_close_error(handle, uv_new_sys_error(WSAGetLastError())); + uv_close_error((uv_handle_t*)handle, uv_new_sys_error(WSAGetLastError())); return; } @@ -762,7 +764,7 @@ static void uv_queue_accept(uv_handle_t* handle) { /* destroy the preallocated client handle */ closesocket(accept_socket); /* destroy ourselves */ - uv_close_error(handle, uv_last_error_); + uv_close_error((uv_handle_t*)handle, uv_last_error_); return; } @@ -773,7 +775,7 @@ static void uv_queue_accept(uv_handle_t* handle) { } -static void uv_queue_read(uv_handle_t* handle) { +static void uv_queue_read(uv_tcp_t* handle) { uv_req_t *req; uv_buf_t buf; int result; @@ -799,7 +801,7 @@ static void uv_queue_read(uv_handle_t* handle) { NULL); if (result != 0 && WSAGetLastError() != ERROR_IO_PENDING) { uv_set_sys_error(WSAGetLastError()); - uv_close_error(handle, uv_last_error_); + uv_close_error((uv_handle_t*)handle, uv_last_error_); return; } @@ -808,7 +810,7 @@ static void uv_queue_read(uv_handle_t* handle) { } -int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) { +int uv_listen(uv_tcp_t* handle, int backlog, uv_accept_cb cb) { assert(backlog > 0); if (handle->flags & UV_HANDLE_BIND_ERROR) { @@ -831,14 +833,14 @@ int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) { handle->flags |= UV_HANDLE_LISTENING; handle->accept_cb = cb; - uv_req_init(&(handle->accept_req), handle, NULL); + uv_req_init(&(handle->accept_req), (uv_handle_t*)handle, NULL); uv_queue_accept(handle); return 0; } -int uv_accept(uv_handle_t* server, uv_handle_t* client, +int uv_accept(uv_tcp_t* server, uv_tcp_t* client, uv_close_cb close_cb, void* data) { int rv = 0; @@ -865,7 +867,7 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client, } -int uv_read_start(uv_handle_t* handle, uv_read_cb cb) { +int uv_read_start(uv_tcp_t* handle, uv_read_cb cb) { if (!(handle->flags & UV_HANDLE_CONNECTION)) { uv_set_sys_error(WSAEINVAL); return -1; @@ -893,7 +895,7 @@ int uv_read_start(uv_handle_t* handle, uv_read_cb cb) { } -int uv_read_stop(uv_handle_t* handle) { +int uv_read_stop(uv_tcp_t* handle) { handle->flags &= ~UV_HANDLE_READING; return 0; @@ -904,7 +906,7 @@ int uv_connect(uv_req_t* req, struct sockaddr* addr) { int addrsize; BOOL success; DWORD bytes; - uv_handle_t* handle = req->handle; + uv_tcp_t* handle = (uv_tcp_t*)req->handle; assert(!(req->flags & UV_REQ_PENDING)); @@ -965,7 +967,7 @@ static size_t uv_count_bufs(uv_buf_t bufs[], int count) { int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) { int result; DWORD bytes, err; - uv_handle_t* handle = req->handle; + uv_tcp_t* handle = (uv_tcp_t*) req->handle; assert(!(req->flags & UV_REQ_PENDING)); @@ -1016,7 +1018,7 @@ int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) { int uv_shutdown(uv_req_t* req) { - uv_handle_t* handle = req->handle; + uv_tcp_t* handle = (uv_tcp_t*) req->handle; int status = 0; if (!(req->handle->flags & UV_HANDLE_CONNECTION)) { @@ -1036,13 +1038,13 @@ int uv_shutdown(uv_req_t* req) { handle->shutdown_req = req; handle->reqs_pending++; - uv_want_endgame(handle); + uv_want_endgame((uv_handle_t*)handle); return 0; } -static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) { +static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) { BOOL success; DWORD bytes, flags, err; uv_buf_t buf; @@ -1058,7 +1060,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) { handle->write_queue_size -= req->queued_bytes; if (!success) { uv_set_sys_error(GetLastError()); - uv_close_error(handle, uv_last_error_); + uv_close_error((uv_handle_t*)handle, uv_last_error_); } if (req->cb) { ((uv_write_cb)req->cb)(req, success ? 0 : -1); @@ -1067,7 +1069,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) { if (success && handle->write_reqs_pending == 0 && handle->flags & UV_HANDLE_SHUTTING) { - uv_want_endgame(handle); + uv_want_endgame((uv_handle_t*)handle); } break; @@ -1075,7 +1077,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) { success = GetOverlappedResult(handle->handle, &req->overlapped, &bytes, FALSE); if (!success) { uv_set_sys_error(GetLastError()); - uv_close_error(handle, uv_last_error_); + uv_close_error((uv_handle_t*)handle, uv_last_error_); } while (handle->flags & UV_HANDLE_READING) { buf = uv_alloc_(handle, 65536); @@ -1102,7 +1104,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) { uv_last_error_.code = UV_EOF; uv_last_error_.sys_errno_ = ERROR_SUCCESS; ((uv_read_cb)handle->read_cb)(handle, -1, buf); - uv_want_endgame(handle); + uv_want_endgame((uv_handle_t*)handle); break; } } else { @@ -1114,7 +1116,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) { } else { /* Ouch! serious error. */ uv_set_sys_error(err); - uv_close_error(handle, uv_last_error_); + uv_close_error((uv_handle_t*)handle, uv_last_error_); } break; } @@ -1186,12 +1188,12 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) { /* more pending requests. */ if (handle->flags & UV_HANDLE_CLOSING && handle->reqs_pending == 0) { - uv_want_endgame(handle); + uv_want_endgame((uv_handle_t*)handle); } } -static int uv_timer_compare(uv_handle_t* a, uv_handle_t* b) { +static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) { if (a->due < b->due) return -1; if (a->due > b->due) @@ -1204,10 +1206,10 @@ static int uv_timer_compare(uv_handle_t* a, uv_handle_t* b) { } -RB_GENERATE_STATIC(uv_timer_s, uv_handle_s, tree_entry, uv_timer_compare); +RB_GENERATE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare); -int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) { +int uv_timer_init(uv_timer_t* handle, uv_close_cb close_cb, void* data) { handle->type = UV_TIMER; handle->close_cb = (void*) close_cb; handle->data = data; @@ -1222,9 +1224,9 @@ int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) { } -int uv_timer_start(uv_handle_t* handle, uv_loop_cb timer_cb, int64_t timeout, int64_t repeat) { +int uv_timer_start(uv_timer_t* handle, uv_loop_cb timer_cb, int64_t timeout, int64_t repeat) { if (handle->flags & UV_HANDLE_ACTIVE) { - RB_REMOVE(uv_timer_s, &uv_timers_, handle); + RB_REMOVE(uv_timer_tree_s, &uv_timers_, handle); } handle->timer_cb = (void*) timer_cb; @@ -1232,7 +1234,7 @@ int uv_timer_start(uv_handle_t* handle, uv_loop_cb timer_cb, int64_t timeout, in handle->repeat = repeat; handle->flags |= UV_HANDLE_ACTIVE; - if (RB_INSERT(uv_timer_s, &uv_timers_, handle) != NULL) { + if (RB_INSERT(uv_timer_tree_s, &uv_timers_, handle) != NULL) { uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT"); } @@ -1240,11 +1242,11 @@ int uv_timer_start(uv_handle_t* handle, uv_loop_cb timer_cb, int64_t timeout, in } -int uv_timer_stop(uv_handle_t* handle) { +int uv_timer_stop(uv_timer_t* handle) { if (!(handle->flags & UV_HANDLE_ACTIVE)) return 0; - RB_REMOVE(uv_timer_s, &uv_timers_, handle); + RB_REMOVE(uv_timer_tree_s, &uv_timers_, handle); handle->flags &= ~UV_HANDLE_ACTIVE; @@ -1252,7 +1254,7 @@ int uv_timer_stop(uv_handle_t* handle) { } -int uv_timer_again(uv_handle_t* handle) { +int uv_timer_again(uv_timer_t* handle) { /* If timer_cb is NULL that means that the timer was never started. */ if (!handle->timer_cb) { uv_set_sys_error(ERROR_INVALID_DATA); @@ -1260,14 +1262,14 @@ int uv_timer_again(uv_handle_t* handle) { } if (handle->flags & UV_HANDLE_ACTIVE) { - RB_REMOVE(uv_timer_s, &uv_timers_, handle); + RB_REMOVE(uv_timer_tree_s, &uv_timers_, handle); handle->flags &= ~UV_HANDLE_ACTIVE; } if (handle->repeat) { handle->due = uv_now_ + handle->repeat; - if (RB_INSERT(uv_timer_s, &uv_timers_, handle) != NULL) { + if (RB_INSERT(uv_timer_tree_s, &uv_timers_, handle) != NULL) { uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT"); } @@ -1278,13 +1280,13 @@ int uv_timer_again(uv_handle_t* handle) { } -void uv_timer_set_repeat(uv_handle_t* handle, int64_t repeat) { +void uv_timer_set_repeat(uv_timer_t* handle, int64_t repeat) { assert(handle->type == UV_TIMER); handle->repeat = repeat; } -int64_t uv_timer_get_repeat(uv_handle_t* handle) { +int64_t uv_timer_get_repeat(uv_timer_t* handle) { assert(handle->type == UV_TIMER); return handle->repeat; } @@ -1383,57 +1385,57 @@ static void uv_loop_invoke(uv_handle_t* list) { } -int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) { +int uv_prepare_init(uv_prepare_t* handle, uv_close_cb close_cb, void* data) { handle->type = UV_PREPARE; - return uv_loop_init(handle, close_cb, data); + return uv_loop_init((uv_handle_t*)handle, close_cb, data); } -int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) { +int uv_check_init(uv_check_t* handle, uv_close_cb close_cb, void* data) { handle->type = UV_CHECK; - return uv_loop_init(handle, close_cb, data); + return uv_loop_init((uv_handle_t*)handle, close_cb, data); } -int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) { +int uv_idle_init(uv_idle_t* handle, uv_close_cb close_cb, void* data) { handle->type = UV_IDLE; - return uv_loop_init(handle, close_cb, data); + return uv_loop_init((uv_handle_t*)handle, close_cb, data); } -int uv_prepare_start(uv_handle_t* handle, uv_loop_cb loop_cb) { +int uv_prepare_start(uv_prepare_t* handle, uv_loop_cb loop_cb) { assert(handle->type == UV_PREPARE); - return uv_loop_start(handle, loop_cb, &uv_prepare_handles_); + return uv_loop_start((uv_handle_t*)handle, loop_cb, &uv_prepare_handles_); } -int uv_check_start(uv_handle_t* handle, uv_loop_cb loop_cb) { +int uv_check_start(uv_check_t* handle, uv_loop_cb loop_cb) { assert(handle->type == UV_CHECK); - return uv_loop_start(handle, loop_cb, &uv_check_handles_); + return uv_loop_start((uv_handle_t*)handle, loop_cb, &uv_check_handles_); } -int uv_idle_start(uv_handle_t* handle, uv_loop_cb loop_cb) { +int uv_idle_start(uv_idle_t* handle, uv_loop_cb loop_cb) { assert(handle->type == UV_IDLE); - return uv_loop_start(handle, loop_cb, &uv_idle_handles_); + return uv_loop_start((uv_handle_t*)handle, loop_cb, &uv_idle_handles_); } -int uv_prepare_stop(uv_handle_t* handle) { +int uv_prepare_stop(uv_prepare_t* handle) { assert(handle->type == UV_PREPARE); - return uv_loop_stop(handle, &uv_prepare_handles_); + return uv_loop_stop((uv_handle_t*)handle, &uv_prepare_handles_); } -int uv_check_stop(uv_handle_t* handle) { +int uv_check_stop(uv_check_t* handle) { assert(handle->type == UV_CHECK); - return uv_loop_stop(handle, &uv_check_handles_); + return uv_loop_stop((uv_handle_t*)handle, &uv_check_handles_); } -int uv_idle_stop(uv_handle_t* handle) { +int uv_idle_stop(uv_idle_t* handle) { assert(handle->type == UV_IDLE); - return uv_loop_stop(handle, &uv_idle_handles_); + return uv_loop_stop((uv_handle_t*)handle, &uv_idle_handles_); } @@ -1451,7 +1453,7 @@ int uv_is_active(uv_handle_t* handle) { } -int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb, +int uv_async_init(uv_async_t* handle, uv_async_cb async_cb, uv_close_cb close_cb, void* data) { uv_req_t* req; @@ -1463,7 +1465,7 @@ int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb, handle->error = uv_ok_; req = &handle->async_req; - uv_req_init(req, handle, async_cb); + uv_req_init(req, (uv_handle_t*)handle, async_cb); req->type = UV_WAKEUP; uv_refs_++; @@ -1472,7 +1474,7 @@ int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb, } -int uv_async_send(uv_handle_t* handle) { +int uv_async_send(uv_async_t* handle) { if (handle->type != UV_ASYNC) { /* Can't set errno because that's not thread-safe. */ return -1; @@ -1495,16 +1497,16 @@ int uv_async_send(uv_handle_t* handle) { } -static void uv_async_return_req(uv_handle_t* handle, uv_req_t* req) { +static void uv_async_return_req(uv_async_t* handle, uv_req_t* req) { assert(handle->type == UV_ASYNC); assert(req->type == UV_WAKEUP); handle->async_sent = 0; if (req->cb) { - ((uv_async_cb)req->cb)(handle, 0); + ((uv_async_cb)req->cb)((uv_handle_t*)handle, 0); } if (handle->flags & UV_HANDLE_CLOSING) { - uv_want_endgame(handle); + uv_want_endgame((uv_handle_t*)handle); } } @@ -1515,6 +1517,7 @@ static void uv_poll() { ULONG_PTR key; OVERLAPPED* overlapped; uv_req_t* req; + uv_timer_t* timer; uv_handle_t* handle; DWORD timeout; int64_t delta; @@ -1530,9 +1533,9 @@ static void uv_poll() { uv_update_time(); /* Check if there are any running timers */ - handle = RB_MIN(uv_timer_s, &uv_timers_); - if (handle) { - delta = handle->due - uv_now_; + timer = RB_MIN(uv_timer_tree_s, &uv_timers_); + if (timer) { + delta = timer->due - uv_now_; if (delta >= UINT_MAX) { /* Can't have a timeout greater than UINT_MAX, and a timeout value of */ /* UINT_MAX means infinite, so that's no good either. */ @@ -1560,26 +1563,26 @@ static void uv_poll() { uv_loop_invoke(uv_check_handles_); /* Call timer callbacks */ - for (handle = RB_MIN(uv_timer_s, &uv_timers_); - handle != NULL && handle->due <= uv_now_; - handle = RB_MIN(uv_timer_s, &uv_timers_)) { - RB_REMOVE(uv_timer_s, &uv_timers_, handle); + for (timer = RB_MIN(uv_timer_tree_s, &uv_timers_); + timer != NULL && timer->due <= uv_now_; + timer = RB_MIN(uv_timer_tree_s, &uv_timers_)) { + RB_REMOVE(uv_timer_tree_s, &uv_timers_, timer); - if (handle->repeat != 0) { + if (timer->repeat != 0) { /* If it is a repeating timer, reschedule with repeat timeout. */ - handle->due += handle->repeat; - if (handle->due < uv_now_) { - handle->due = uv_now_; + timer->due += timer->repeat; + if (timer->due < uv_now_) { + timer->due = uv_now_; } - if (RB_INSERT(uv_timer_s, &uv_timers_, handle) != NULL) { + if (RB_INSERT(uv_timer_tree_s, &uv_timers_, timer) != NULL) { uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT"); } } else { /* If non-repeating, mark the timer as inactive. */ - handle->flags &= ~UV_HANDLE_ACTIVE; + timer->flags &= ~UV_HANDLE_ACTIVE; } - ((uv_loop_cb) handle->timer_cb)(handle, 0); + ((uv_loop_cb) timer->timer_cb)((uv_handle_t*)timer, 0); } /* Only if a iocp package was dequeued... */ @@ -1589,11 +1592,11 @@ static void uv_poll() { switch (handle->type) { case UV_TCP: - uv_tcp_return_req(handle, req); + uv_tcp_return_req((uv_tcp_t*)handle, req); break; case UV_ASYNC: - uv_async_return_req(handle, req); + uv_async_return_req((uv_async_t*)handle, req); break; default: diff --git a/uv-win.h b/uv-win.h index 5c354ffa..f409a0d4 100644 --- a/uv-win.h +++ b/uv-win.h @@ -41,7 +41,7 @@ typedef struct uv_buf_t { char* base; } uv_buf_t; -#define uv_req_private_fields \ +#define UV_REQ_PRIVATE_FIELDS \ union { \ /* Used by I/O operations */ \ struct { \ @@ -63,7 +63,7 @@ typedef struct uv_buf_t { struct uv_req_s accept_req; \ char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32]; -#define uv_tcp_fields \ +#define UV_TCP_PRIVATE_FIELDS \ unsigned int reqs_pending; \ union { \ SOCKET socket; \ @@ -74,32 +74,36 @@ typedef struct uv_buf_t { struct { uv_tcp_server_fields }; \ }; -#define uv_timer_fields \ - RB_ENTRY(uv_handle_s) tree_entry; \ +#define UV_TIMER_PRIVATE_FIELDS \ + RB_ENTRY(uv_timer_s) tree_entry; \ int64_t due; \ int64_t repeat; \ void* timer_cb; -#define uv_loop_fields \ +#define UV_LOOP_PRIVATE_FIELDS \ uv_handle_t* loop_prev; \ uv_handle_t* loop_next; \ void* loop_cb; -#define uv_async_fields \ +#define UV_ASYNC_PRIVATE_FIELDS \ struct uv_req_s async_req; \ /* char to avoid alignment issues */ \ char volatile async_sent; -#define uv_handle_private_fields \ +#define UV_PREPARE_PRIVATE_FIELDS /* empty */ +#define UV_CHECK_PRIVATE_FIELDS /* empty */ +#define UV_IDLE_PRIVATE_FIELDS /* empty */ + +/* + * TODO: remove UV_LOOP_PRIVATE_FIELDS from UV_HANDLE_PRIVATE_FIELDS and + * use it in UV_(PREPARE|CHECK|IDLE)_PRIVATE_FIELDS instead. + */ + +#define UV_HANDLE_PRIVATE_FIELDS \ uv_handle_t* endgame_next; \ unsigned int flags; \ uv_err_t error; \ - union { \ - struct { uv_tcp_fields }; \ - struct { uv_timer_fields }; \ - struct { uv_loop_fields }; \ - struct { uv_async_fields }; \ - }; + UV_LOOP_PRIVATE_FIELDS int uv_utf16_to_utf8(wchar_t* utf16Buffer, size_t utf16Size, char* utf8Buffer, size_t utf8Size); diff --git a/uv.h b/uv.h index d29e659c..b52c29a8 100644 --- a/uv.h +++ b/uv.h @@ -33,6 +33,11 @@ extern "C" { typedef struct uv_err_s uv_err_t; typedef struct uv_handle_s uv_handle_t; +typedef struct uv_tcp_s uv_tcp_t; +typedef struct uv_timer_s uv_timer_t; +typedef struct uv_prepare_s uv_prepare_t; +typedef struct uv_check_s uv_check_t; +typedef struct uv_idle_s uv_idle_t; typedef struct uv_req_s uv_req_t; @@ -51,12 +56,12 @@ typedef struct uv_req_s uv_req_t; * In the case of uv_read_cb the uv_buf_t returned should be freed by the * user. */ -typedef uv_buf_t (*uv_alloc_cb)(uv_handle_t* handle, size_t suggested_size); -typedef void (*uv_read_cb)(uv_handle_t *handle, int nread, uv_buf_t buf); +typedef uv_buf_t (*uv_alloc_cb)(uv_tcp_t* tcp, size_t suggested_size); +typedef void (*uv_read_cb)(uv_tcp_t* tcp, int nread, uv_buf_t buf); typedef void (*uv_write_cb)(uv_req_t* req, int status); typedef void (*uv_connect_cb)(uv_req_t* req, int status); typedef void (*uv_shutdown_cb)(uv_req_t* req, int status); -typedef void (*uv_accept_cb)(uv_handle_t* handle); +typedef void (*uv_accept_cb)(uv_tcp_t* server); typedef void (*uv_close_cb)(uv_handle_t* handle, int status); /* TODO: do loop_cb and async_cb really need a status argument? */ typedef void (*uv_loop_cb)(uv_handle_t* handle, int status); @@ -142,70 +147,67 @@ struct uv_req_s { void* cb; void* data; /* private */ - uv_req_private_fields + UV_REQ_PRIVATE_FIELDS }; - -struct uv_handle_s { - /* read-only */ - uv_handle_type type; - /* public */ - uv_close_cb close_cb; - void* data; - /* number of bytes queued for writing */ - size_t write_queue_size; - /* private */ - uv_handle_private_fields -}; - - -/* Most functions return boolean: 0 for success and -1 for failure. - * On error the user should then call uv_last_error() to determine - * the error code. - */ -uv_err_t uv_last_error(); -char* uv_strerror(uv_err_t err); -const char* uv_err_name(uv_err_t err); - -void uv_init(uv_alloc_cb alloc); -int uv_run(); - -/* Manually modify the event loop's reference count. Useful if the user wants - * to have a handle or timeout that doesn't keep the loop alive. - */ -void uv_ref(); -void uv_unref(); - -void uv_update_time(); -int64_t uv_now(); - -void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb); - /* - * TODO: - * - uv_(pipe|pipe_tty)_handle_init - * - uv_bind_pipe(char* name) - * - uv_continuous_read(uv_handle_t* handle, uv_continuous_read_cb* cb) - * - A way to list cancelled uv_reqs after before/on uv_close_cb + * Initialize a request for use with uv_write, uv_shutdown, or uv_connect. */ +void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb); -/* TCP socket methods. - * Handle and callback bust be set by calling uv_req_init. + +#define UV_HANDLE_FIELDS \ + /* read-only */ \ + uv_handle_type type; \ + /* public */ \ + uv_close_cb close_cb; \ + void* data; \ + /* private */ \ + UV_HANDLE_PRIVATE_FIELDS \ + +/* The abstract base class of all handles. */ +struct uv_handle_s { + UV_HANDLE_FIELDS +}; + +/* + * Returns 1 if the prepare/check/idle handle has been started, 0 otherwise. + * For other handle types this always returns 1. */ -int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb, void* data); -int uv_bind(uv_handle_t* handle, struct sockaddr* addr); +int uv_is_active(uv_handle_t* handle); + +/* + * Request handle to be closed. close_cb will be called asynchronously after + * this call. This MUST be called on each handle before memory is released. + */ +int uv_close(uv_handle_t* handle); + + +/* + * A subclass of uv_handle_t representing a TCP stream or TCP server. In the + * future this will probably be split into two classes - one a stream and + * the other a server. + */ +struct uv_tcp_s { + UV_HANDLE_FIELDS + size_t write_queue_size; /* number of bytes queued for writing */ + UV_TCP_PRIVATE_FIELDS +}; + +int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data); + +int uv_bind(uv_tcp_t* handle, struct sockaddr* addr); int uv_connect(uv_req_t* req, struct sockaddr* addr); + int uv_shutdown(uv_req_t* req); -/* TCP server methods. */ -int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb); +int uv_listen(uv_tcp_t* handle, int backlog, uv_accept_cb cb); /* Call this after accept_cb. client does not need to be initialized. */ -int uv_accept(uv_handle_t* server, uv_handle_t* client, +int uv_accept(uv_tcp_t* server, uv_tcp_t* client, uv_close_cb close_cb, void* data); - /* Read data from an incoming stream. The callback will be made several * several times until there is no more data to read or uv_read_stop is * called. When we've reached EOF nread will be set to -1 and the error is @@ -215,77 +217,138 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client, * eof; it happens when libuv requested a buffer through the alloc callback * but then decided that it didn't need that buffer. */ -int uv_read_start(uv_handle_t* handle, uv_read_cb cb); -int uv_read_stop(uv_handle_t* handle); +int uv_read_start(uv_tcp_t*, uv_read_cb cb); + +int uv_read_stop(uv_tcp_t*); int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt); -/* Timer methods */ -int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data); -int uv_timer_start(uv_handle_t* handle, uv_loop_cb cb, int64_t timeout, int64_t repeat); -int uv_timer_stop(uv_handle_t* handle); + +/* + * Subclass of uv_handle_t. libev wrapper. Every active prepare handle gets + * its callback called exactly once per loop iteration, just before the + * system blocks to wait for completed i/o. + */ +struct uv_prepare_s { + UV_HANDLE_FIELDS + UV_PREPARE_PRIVATE_FIELDS +}; + +int uv_prepare_init(uv_prepare_t* prepare, uv_close_cb close_cb, void* data); + +int uv_prepare_start(uv_prepare_t* prepare, uv_loop_cb cb); + +int uv_prepare_stop(uv_prepare_t* prepare); + + +/* + * Subclass of uv_handle_t. libev wrapper. Every active check handle gets + * its callback called exactly once per loop iteration, just after the + * system returns from blocking. + */ +struct uv_check_s { + UV_HANDLE_FIELDS + UV_CHECK_PRIVATE_FIELDS +}; + +int uv_check_init(uv_check_t* check, uv_close_cb close_cb, void* data); + +int uv_check_start(uv_check_t* check, uv_loop_cb cb); + +int uv_check_stop(uv_check_t* check); + + +/* + * Subclass of uv_handle_t. libev wrapper. Every active idle handle gets its + * callback called repeatedly until it is stopped. This happens after all + * other types of callbacks are processed. When there are multiple "idle" + * handles active, their callbacks are called in turn. + */ +struct uv_idle_s { + UV_HANDLE_FIELDS + UV_IDLE_PRIVATE_FIELDS +}; + +int uv_idle_init(uv_idle_t* idle, uv_close_cb close_cb, void* data); + +int uv_idle_start(uv_idle_t* idle, uv_loop_cb cb); + +int uv_idle_stop(uv_idle_t* idle); + + +/* + * Subclass of uv_handle_t. libev wrapper. uv_async_send wakes up the event + * loop and calls the async handle's callback There is no guarantee that + * every uv_async_send call leads to exactly one invocation of the callback; + * The only guarantee is that the callback function is called at least once + * after the call to async_send. Unlike all other libuv functions, + * uv_async_send can be called from another thread. + */ +typedef struct { + UV_HANDLE_FIELDS + UV_ASYNC_PRIVATE_FIELDS +} uv_async_t; + +int uv_async_init(uv_async_t* async, uv_async_cb async_cb, + uv_close_cb close_cb, void* data); + +int uv_async_send(uv_async_t* async); + + +/* + * Subclass of uv_handle_t. Wraps libev's ev_timer watcher. Used to get + * woken up at a specified time in the future. + */ +struct uv_timer_s { + UV_HANDLE_FIELDS + UV_TIMER_PRIVATE_FIELDS +}; + +int uv_timer_init(uv_timer_t* timer, uv_close_cb close_cb, void* data); + +int uv_timer_start(uv_timer_t* timer, uv_loop_cb cb, int64_t timeout, int64_t repeat); + +int uv_timer_stop(uv_timer_t* timer); + /* * Stop the timer, and if it is repeating restart it using the repeat value * as the timeout. If the timer has never been started before it returns -1 and * sets the error to UV_EINVAL. */ -int uv_timer_again(uv_handle_t* handle); +int uv_timer_again(uv_timer_t* timer); + /* * Set the repeat value. Note that if the repeat value is set from a timer * callback it does not immediately take effect. If the timer was nonrepeating * before, it will have been stopped. If it was repeating, then the old repeat * value will have been used to schedule the next timeout. */ -void uv_timer_set_repeat(uv_handle_t* handle, int64_t repeat); -int64_t uv_timer_get_repeat(uv_handle_t* handle); +void uv_timer_set_repeat(uv_timer_t* timer, int64_t repeat); -/* libev wrapper. Every active prepare handle gets its callback called - * exactly once per loop iteration, just before the system blocks to wait - * for completed i/o. - */ -int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data); -int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb); -int uv_prepare_stop(uv_handle_t* handle); +int64_t uv_timer_get_repeat(uv_timer_t* timer); -/* libev wrapper. Every active check handle gets its callback called exactly - * once per loop iteration, just after the system returns from blocking. - */ -int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data); -int uv_check_start(uv_handle_t* handle, uv_loop_cb cb); -int uv_check_stop(uv_handle_t* handle); -/* libev wrapper. Every active idle handle gets its callback called repeatedly until it is - * stopped. This happens after all other types of callbacks are processed. - * When there are multiple "idle" handles active, their callbacks are called - * in turn. +/* + * Most functions return boolean: 0 for success and -1 for failure. + * On error the user should then call uv_last_error() to determine + * the error code. */ -int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data); -int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb); -int uv_idle_stop(uv_handle_t* handle); +uv_err_t uv_last_error(); +char* uv_strerror(uv_err_t err); +const char* uv_err_name(uv_err_t err); -/* Returns 1 if the prepare/check/idle handle has been started, 0 otherwise. - * For other handle types this always returns 1. - */ -int uv_is_active(uv_handle_t* handle); +void uv_init(uv_alloc_cb alloc); +int uv_run(); -/* libev wrapper. uv_async_send wakes up the event loop and calls the async - * handle's callback There is no guarantee that every uv_async_send call - * leads to exactly one invocation of the callback; The only guarantee is - * that the callback function is called at least once after the call to - * async_send. Unlike everything else, uv_async_send can be called from - * another thread. - * - * QUESTION(ryan) Can UV_ASYNC just use uv_loop_cb? Same signature on my - * side. +/* + * Manually modify the event loop's reference count. Useful if the user wants + * to have a handle or timeout that doesn't keep the loop alive. */ -int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb, - uv_close_cb close_cb, void* data); -int uv_async_send(uv_handle_t* handle); +void uv_ref(); +void uv_unref(); -/* Request handle to be closed. close_cb will be called - * asynchronously after this call. - */ -int uv_close(uv_handle_t* handle); +void uv_update_time(); +int64_t uv_now(); /* Utility */ @@ -294,6 +357,17 @@ struct sockaddr_in uv_ip4_addr(char* ip, int port); /* Gets the executable path */ int uv_get_exepath(char* buffer, size_t* size); + +/* the presence of this union forces similar struct layout */ +union uv_any_handle { + uv_tcp_t tcp; + uv_prepare_t prepare; + uv_check_t check; + uv_idle_t idle; + uv_async_t async; + uv_timer_t timer; +}; + #ifdef __cplusplus } #endif