Separate out uv_handle_t into different types

Fixes #4
This commit is contained in:
Ryan Dahl 2011-06-01 18:19:47 -07:00
parent fb5262abdc
commit 11a4ad50c8
19 changed files with 805 additions and 679 deletions

View File

@ -33,7 +33,7 @@
typedef struct {
int pongs;
int state;
uv_handle_t handle;
uv_tcp_t tcp;
uv_req_t connect_req;
uv_req_t shutdown_req;
} pinger_t;
@ -52,7 +52,7 @@ static int completed_pingers = 0;
static int64_t start_time;
static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size) {
static uv_buf_t buf_alloc(uv_tcp_t* tcp, size_t size) {
buf_t* ab;
ab = buf_freelist;
@ -107,7 +107,7 @@ static void pinger_write_ping(pinger_t* pinger) {
buf.len = strlen(PING);
req = (uv_req_t*)malloc(sizeof(*req));
uv_req_init(req, &pinger->handle, pinger_write_cb);
uv_req_init(req, (uv_handle_t*)(&pinger->tcp), pinger_write_cb);
if (uv_write(req, &buf, 1)) {
FATAL("uv_write failed");
@ -120,11 +120,11 @@ static void pinger_shutdown_cb(uv_handle_t* handle, int status) {
}
static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
unsigned int i;
pinger_t* pinger;
pinger = (pinger_t*)handle->data;
pinger = (pinger_t*)tcp->data;
if (nread < 0) {
ASSERT(uv_last_error().code == UV_EOF);
@ -143,7 +143,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
if (pinger->state == 0) {
pinger->pongs++;
if (uv_now() - start_time > TIME) {
uv_req_init(&pinger->shutdown_req, handle, pinger_shutdown_cb);
uv_req_init(&pinger->shutdown_req, (uv_handle_t*)tcp, pinger_shutdown_cb);
uv_shutdown(&pinger->shutdown_req);
break;
return;
@ -164,7 +164,7 @@ static void pinger_connect_cb(uv_req_t *req, int status) {
pinger_write_ping(pinger);
if (uv_read_start(req->handle, pinger_read_cb)) {
if (uv_read_start((uv_tcp_t*)(req->handle), pinger_read_cb)) {
FATAL("uv_read_start failed");
}
}
@ -181,14 +181,15 @@ static void pinger_new() {
pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(&pinger->handle, pinger_close_cb, (void*)pinger);
r = uv_tcp_init(&pinger->tcp, pinger_close_cb, (void*)pinger);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
uv_req_init(&pinger->connect_req, &pinger->handle, pinger_connect_cb);
uv_req_init(&pinger->connect_req, (uv_handle_t*)&pinger->tcp,
pinger_connect_cb);
uv_bind(&pinger->handle, (struct sockaddr*)&client_addr);
uv_bind(&pinger->tcp, (struct sockaddr*)&client_addr);
r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
ASSERT(!r);
}

View File

@ -35,17 +35,17 @@ static int TARGET_CONNECTIONS;
#define STATS_COUNT 5
static void do_write(uv_handle_t* handle);
static void do_write(uv_tcp_t*);
static void maybe_connect_some();
static uv_req_t* req_alloc();
static void req_free(uv_req_t* uv_req);
static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size);
static uv_buf_t buf_alloc(uv_tcp_t*, size_t size);
static void buf_free(uv_buf_t uv_buf_t);
static uv_handle_t server;
static uv_tcp_t server;
static struct sockaddr_in listen_addr;
static struct sockaddr_in connect_addr;
@ -68,9 +68,9 @@ static char write_buffer[WRITE_BUFFER_SIZE];
/* Make this as large as you need. */
#define MAX_WRITE_HANDLES 1000
static uv_handle_t write_handles[MAX_WRITE_HANDLES];
static uv_tcp_t write_handles[MAX_WRITE_HANDLES];
static uv_handle_t timer_handle;
static uv_timer_t timer_handle;
static double gbit(int64_t bytes, int64_t passed_ms) {
@ -136,7 +136,7 @@ void read_sockets_close_cb(uv_handle_t* handle, int status) {
*/
if (uv_now() - start_time > 1000 && read_sockets == 0) {
read_show_stats();
uv_close(&server);
uv_close((uv_handle_t*)&server);
}
}
@ -157,7 +157,7 @@ static void start_stats_collection() {
}
static void read_cb(uv_handle_t* handle, int bytes, uv_buf_t buf) {
static void read_cb(uv_tcp_t* tcp, int bytes, uv_buf_t buf) {
if (nrecv_total == 0) {
ASSERT(start_time == 0);
uv_update_time();
@ -165,7 +165,7 @@ static void read_cb(uv_handle_t* handle, int bytes, uv_buf_t buf) {
}
if (bytes < 0) {
uv_close(handle);
uv_close((uv_handle_t*)tcp);
return;
}
@ -186,11 +186,11 @@ static void write_cb(uv_req_t *req, int status) {
nsent += sizeof write_buffer;
nsent_total += sizeof write_buffer;
do_write(req->handle);
do_write((uv_tcp_t*)req->handle);
}
static void do_write(uv_handle_t* handle) {
static void do_write(uv_tcp_t* tcp) {
uv_req_t* req;
uv_buf_t buf;
int r;
@ -198,9 +198,9 @@ static void do_write(uv_handle_t* handle) {
buf.base = (char*) &write_buffer;
buf.len = sizeof write_buffer;
while (handle->write_queue_size == 0) {
while (tcp->write_queue_size == 0) {
req = req_alloc();
uv_req_init(req, handle, write_cb);
uv_req_init(req, (uv_handle_t*)tcp, write_cb);
r = uv_write(req, &buf, 1);
ASSERT(r == 0);
@ -232,36 +232,36 @@ static void connect_cb(uv_req_t* req, int status) {
static void maybe_connect_some() {
uv_req_t* req;
uv_handle_t* handle;
uv_tcp_t* tcp;
int r;
while (max_connect_socket < TARGET_CONNECTIONS &&
max_connect_socket < write_sockets + MAX_SIMULTANEOUS_CONNECTS) {
handle = &write_handles[max_connect_socket++];
tcp = &write_handles[max_connect_socket++];
r = uv_tcp_init(handle, write_sockets_close_cb, NULL);
r = uv_tcp_init(tcp, write_sockets_close_cb, NULL);
ASSERT(r == 0);
req = req_alloc();
uv_req_init(req, handle, connect_cb);
uv_req_init(req, (uv_handle_t*)tcp, connect_cb);
r = uv_connect(req, (struct sockaddr*) &connect_addr);
ASSERT(r == 0);
}
}
static void accept_cb(uv_handle_t* s) {
uv_handle_t* handle;
static void accept_cb(uv_tcp_t* s) {
uv_tcp_t* tcp;
int r;
ASSERT(&server == s);
handle = malloc(sizeof(uv_handle_t));
tcp = malloc(sizeof(uv_tcp_t));
r = uv_accept(s, handle, read_sockets_close_cb, NULL);
r = uv_accept(s, tcp, read_sockets_close_cb, NULL);
ASSERT(r == 0);
r = uv_read_start(handle, read_cb);
r = uv_read_start(tcp, read_cb);
ASSERT(r == 0);
read_sockets++;
@ -317,7 +317,7 @@ typedef struct buf_list_s {
static buf_list_t* buf_freelist = NULL;
static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size) {
static uv_buf_t buf_alloc(uv_tcp_t* tcp, size_t size) {
buf_list_t* buf;
buf = buf_freelist;

View File

@ -24,7 +24,12 @@
BENCHMARK_IMPL(sizes) {
LOGF("uv_handle_t: %lu bytes\n", sizeof(uv_handle_t));
LOGF("uv_req_t: %lu bytes\n", sizeof(uv_req_t));
LOGF("uv_tcp_t: %lu bytes\n", sizeof(uv_tcp_t));
LOGF("uv_prepare_t: %lu bytes\n", sizeof(uv_prepare_t));
LOGF("uv_check_t: %lu bytes\n", sizeof(uv_check_t));
LOGF("uv_idle_t: %lu bytes\n", sizeof(uv_idle_t));
LOGF("uv_async_t: %lu bytes\n", sizeof(uv_async_t));
LOGF("uv_timer_t: %lu bytes\n", sizeof(uv_timer_t));
return 0;
}

View File

@ -32,13 +32,13 @@ typedef struct {
static int server_closed;
static uv_handle_t server;
static uv_tcp_t server;
static void after_write(uv_req_t* req, int status);
static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf);
static void after_read(uv_tcp_t*, int nread, uv_buf_t buf);
static void on_close(uv_handle_t* peer, int status);
static void on_accept(uv_handle_t* handle);
static void on_accept(uv_tcp_t*);
static void after_write(uv_req_t* req, int status) {
@ -64,7 +64,7 @@ static void after_shutdown(uv_req_t* req, int status) {
}
static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) {
static void after_read(uv_tcp_t* handle, int nread, uv_buf_t buf) {
int i;
write_req_t *wr;
uv_req_t* req;
@ -78,7 +78,7 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) {
}
req = (uv_req_t*) malloc(sizeof *req);
uv_req_init(req, handle, after_shutdown);
uv_req_init(req, (uv_handle_t*)handle, after_shutdown);
uv_shutdown(req);
return;
@ -94,7 +94,7 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) {
if (!server_closed) {
for (i = 0; i < nread; i++) {
if (buf.base[i] == 'Q') {
uv_close(&server);
uv_close((uv_handle_t*)&server);
server_closed = 1;
}
}
@ -102,7 +102,7 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) {
wr = (write_req_t*) malloc(sizeof *wr);
uv_req_init(&wr->req, handle, after_write);
uv_req_init(&wr->req, (uv_handle_t*)handle, after_write);
wr->buf.base = buf.base;
wr->buf.len = nread;
if (uv_write(&wr->req, &wr->buf, 1)) {
@ -118,8 +118,8 @@ static void on_close(uv_handle_t* peer, int status) {
}
static void on_accept(uv_handle_t* server) {
uv_handle_t* handle = (uv_handle_t*) malloc(sizeof *handle);
static void on_accept(uv_tcp_t* server) {
uv_tcp_t* handle = (uv_tcp_t*) malloc(sizeof *handle);
if (uv_accept(server, handle, on_close, NULL)) {
FATAL("uv_accept failed");
@ -130,7 +130,7 @@ static void on_accept(uv_handle_t* server) {
static void on_server_close(uv_handle_t* handle, int status) {
ASSERT(handle == &server);
ASSERT(handle == (uv_handle_t*)&server);
ASSERT(status == 0);
}
@ -164,7 +164,7 @@ static int echo_start(int port) {
}
static uv_buf_t echo_alloc(uv_handle_t* handle, size_t suggested_size) {
static uv_buf_t echo_alloc(uv_tcp_t* handle, size_t suggested_size) {
uv_buf_t buf;
buf.base = (char*) malloc(suggested_size);
buf.len = suggested_size;

View File

@ -25,9 +25,9 @@
#include <stdlib.h>
static uv_handle_t prepare_handle;
static uv_prepare_t prepare_handle;
static uv_handle_t async1_handle;
static uv_async_t async1_handle;
/* static uv_handle_t async2_handle; */
static int prepare_cb_called = 0;
@ -120,7 +120,7 @@ static void close_cb(uv_handle_t* handle, int status) {
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* handle, size_t size) {
uv_buf_t buf = {0, 0};
FATAL("alloc should not be called");
return buf;
@ -128,7 +128,7 @@ static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static void async1_cb(uv_handle_t* handle, int status) {
ASSERT(handle == &async1_handle);
ASSERT(handle == (uv_handle_t*)&async1_handle);
ASSERT(status == 0);
async1_cb_called++;
@ -159,7 +159,7 @@ static void async2_cb(uv_handle_t* handle, int status) {
static void prepare_cb(uv_handle_t* handle, int status) {
int r;
ASSERT(handle == &prepare_handle);
ASSERT(handle == (uv_handle_t*)&prepare_handle);
ASSERT(status == 0);
switch (prepare_cb_called) {

View File

@ -36,7 +36,7 @@ static void close_cb(uv_handle_t* handle, int status) {
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* handle, size_t size) {
uv_buf_t buf = {0, 0};
FATAL("alloc should not be called");
return buf;
@ -45,7 +45,7 @@ static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
TEST_IMPL(bind_error_addrinuse) {
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
uv_handle_t server1, server2;
uv_tcp_t server1, server2;
int r;
uv_init(alloc_cb);
@ -67,8 +67,8 @@ TEST_IMPL(bind_error_addrinuse) {
ASSERT(uv_last_error().code == UV_EADDRINUSE);
uv_close(&server1);
uv_close(&server2);
uv_close((uv_handle_t*)&server1);
uv_close((uv_handle_t*)&server2);
uv_run();
@ -80,7 +80,7 @@ TEST_IMPL(bind_error_addrinuse) {
TEST_IMPL(bind_error_addrnotavail_1) {
struct sockaddr_in addr = uv_ip4_addr("127.255.255.255", TEST_PORT);
uv_handle_t server;
uv_tcp_t server;
int r;
uv_init(alloc_cb);
@ -94,7 +94,7 @@ TEST_IMPL(bind_error_addrnotavail_1) {
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
}
uv_close(&server);
uv_close((uv_handle_t*)&server);
uv_run();
@ -106,7 +106,7 @@ TEST_IMPL(bind_error_addrnotavail_1) {
TEST_IMPL(bind_error_addrnotavail_2) {
struct sockaddr_in addr = uv_ip4_addr("4.4.4.4", TEST_PORT);
uv_handle_t server;
uv_tcp_t server;
int r;
uv_init(alloc_cb);
@ -117,7 +117,7 @@ TEST_IMPL(bind_error_addrnotavail_2) {
ASSERT(r == -1);
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
uv_close(&server);
uv_close((uv_handle_t*)&server);
uv_run();
@ -129,7 +129,7 @@ TEST_IMPL(bind_error_addrnotavail_2) {
TEST_IMPL(bind_error_fault) {
char garbage[] = "blah blah blah blah blah blah blah blah blah blah blah blah";
uv_handle_t server;
uv_tcp_t server;
int r;
uv_init(alloc_cb);
@ -141,7 +141,7 @@ TEST_IMPL(bind_error_fault) {
ASSERT(uv_last_error().code == UV_EFAULT);
uv_close(&server);
uv_close((uv_handle_t*)&server);
uv_run();
@ -155,7 +155,7 @@ TEST_IMPL(bind_error_fault) {
TEST_IMPL(bind_error_inval) {
struct sockaddr_in addr1 = uv_ip4_addr("0.0.0.0", TEST_PORT);
struct sockaddr_in addr2 = uv_ip4_addr("0.0.0.0", TEST_PORT_2);
uv_handle_t server;
uv_tcp_t server;
int r;
uv_init(alloc_cb);
@ -169,7 +169,7 @@ TEST_IMPL(bind_error_inval) {
ASSERT(uv_last_error().code == UV_EINVAL);
uv_close(&server);
uv_close((uv_handle_t*)&server);
uv_run();

View File

@ -30,7 +30,8 @@
static const char MESSAGE[] = "Failure is for the weak. Everyone dies alone.";
static uv_handle_t client, timer;
static uv_tcp_t client;
static uv_timer_t timer;
static uv_req_t connect_req, write_req, shutdown_req;
static int nested = 0;
@ -58,7 +59,7 @@ static void shutdown_cb(uv_req_t* req, int status) {
}
static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
ASSERT(nested == 0 && "read_cb must be called from a fresh stack");
printf("Read. nread == %d\n", nread);
@ -72,7 +73,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
ASSERT(uv_last_error().code == UV_EOF);
nested++;
if (uv_close(handle)) {
if (uv_close((uv_handle_t*)tcp)) {
FATAL("uv_close failed");
}
nested--;
@ -88,7 +89,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
/* from a fresh stack. */
if (bytes_received == sizeof MESSAGE) {
nested++;
uv_req_init(&shutdown_req, handle, shutdown_cb);
uv_req_init(&shutdown_req, (uv_handle_t*)tcp, shutdown_cb);
puts("Shutdown");
@ -103,7 +104,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
static void timer_cb(uv_handle_t* handle, int status) {
int r;
ASSERT(handle == &timer);
ASSERT(handle == (uv_handle_t*)&timer);
ASSERT(status == 0);
ASSERT(nested == 0 && "timer_cb must be called from a fresh stack");
@ -170,7 +171,7 @@ static void connect_cb(uv_req_t* req, int status) {
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf;
buf.len = size;
buf.base = (char*) malloc(size);
@ -191,7 +192,7 @@ TEST_IMPL(callback_stack) {
puts("Connecting...");
nested++;
uv_req_init(&connect_req, &client, connect_cb);
uv_req_init(&connect_req, (uv_handle_t*)&client, connect_cb);
if (uv_connect(&connect_req, (struct sockaddr*) &addr)) {
FATAL("uv_connect failed");
}

View File

@ -26,7 +26,7 @@
#include <stdio.h>
static uv_handle_t handle;
static uv_tcp_t tcp;
static uv_req_t req;
static int connect_cb_calls;
static int close_cb_calls;
@ -46,7 +46,7 @@ static void on_connect(uv_req_t *req, int status) {
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf = {0, 0};
FATAL("alloc should not be called");
return buf;
@ -65,14 +65,14 @@ TEST_IMPL(connection_fail) {
server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(&handle, on_close, NULL);
r = uv_tcp_init(&tcp, on_close, NULL);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
uv_req_init(&req, &handle, on_connect);
uv_req_init(&req, (uv_handle_t*)&tcp, on_connect);
uv_bind(&handle, (struct sockaddr*)&client_addr);
uv_bind(&tcp, (struct sockaddr*)&client_addr);
r = uv_connect(&req, (struct sockaddr*)&server_addr);
ASSERT(!r);

View File

@ -44,27 +44,27 @@ static void close_cb(uv_handle_t* handle, int status) {
static void do_accept(uv_handle_t* timer_handle, int status) {
uv_handle_t* server;
uv_handle_t* accepted_handle = (uv_handle_t*)malloc(sizeof *accepted_handle);
uv_tcp_t* server;
uv_tcp_t* accepted_handle = (uv_tcp_t*)malloc(sizeof *accepted_handle);
int r;
ASSERT(timer_handle != NULL);
ASSERT(status == 0);
ASSERT(accepted_handle != NULL);
server = (uv_handle_t*)timer_handle->data;
server = (uv_tcp_t*)timer_handle->data;
r = uv_accept(server, accepted_handle, close_cb, NULL);
ASSERT(r == 0);
do_accept_called++;
/* Immediately close the accepted handle. */
r = uv_close(accepted_handle);
r = uv_close((uv_handle_t*)accepted_handle);
ASSERT(r == 0);
/* After accepting the two clients close the server handle */
if (do_accept_called == 2) {
r = uv_close(server);
r = uv_close((uv_handle_t*)server);
ASSERT(r == 0);
}
@ -74,15 +74,15 @@ static void do_accept(uv_handle_t* timer_handle, int status) {
}
static void accept_cb(uv_handle_t* handle) {
static void accept_cb(uv_tcp_t* tcp) {
int r;
uv_handle_t* timer_handle;
uv_timer_t* timer_handle;
timer_handle = (uv_handle_t*)malloc(sizeof *timer_handle);
timer_handle = (uv_timer_t*)malloc(sizeof *timer_handle);
ASSERT(timer_handle != NULL);
/* Accept the client after 1 second */
r = uv_timer_init(timer_handle, close_cb, (void*)handle);
r = uv_timer_init(timer_handle, close_cb, (void*)tcp);
ASSERT(r == 0);
r = uv_timer_start(timer_handle, do_accept, 1000, 0);
ASSERT(r == 0);
@ -93,7 +93,7 @@ static void accept_cb(uv_handle_t* handle) {
static void start_server() {
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
uv_handle_t* server = (uv_handle_t*)malloc(sizeof *server);
uv_tcp_t* server = (uv_tcp_t*)malloc(sizeof *server);
int r;
ASSERT(server != NULL);
@ -109,9 +109,9 @@ static void start_server() {
}
static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
/* The server will not send anything, it should close gracefully. */
ASSERT(handle != NULL);
ASSERT(tcp != NULL);
ASSERT(nread == -1);
ASSERT(uv_last_error().code == UV_EOF);
@ -119,7 +119,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
free(buf.base);
}
uv_close(handle);
uv_close((uv_handle_t*)tcp);
}
@ -131,7 +131,7 @@ static void connect_cb(uv_req_t* req, int status) {
/* Not that the server will send anything, but otherwise we'll never know */
/* when te server closes the connection. */
r = uv_read_start(req->handle, read_cb);
r = uv_read_start((uv_tcp_t*)(req->handle), read_cb);
ASSERT(r == 0);
connect_cb_called++;
@ -142,7 +142,7 @@ static void connect_cb(uv_req_t* req, int status) {
static void client_connect() {
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client);
uv_tcp_t* client = (uv_tcp_t*)malloc(sizeof *client);
uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
int r;
@ -152,13 +152,13 @@ static void client_connect() {
r = uv_tcp_init(client, close_cb, NULL);
ASSERT(r == 0);
uv_req_init(connect_req, client, connect_cb);
uv_req_init(connect_req, (uv_handle_t*)client, connect_cb);
r = uv_connect(connect_req, (struct sockaddr*)&addr);
ASSERT(r == 0);
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;

View File

@ -75,15 +75,15 @@
#define TIMEOUT 100
static uv_handle_t prepare_1_handle;
static uv_handle_t prepare_2_handle;
static uv_prepare_t prepare_1_handle;
static uv_prepare_t prepare_2_handle;
static uv_handle_t check_handle;
static uv_check_t check_handle;
static uv_handle_t idle_1_handles[IDLE_COUNT];
static uv_handle_t idle_2_handle;
static uv_idle_t idle_1_handles[IDLE_COUNT];
static uv_idle_t idle_2_handle;
static uv_handle_t timer_handle;
static uv_timer_t timer_handle;
static int loop_iteration = 0;
@ -110,7 +110,7 @@ static int timer_cb_called = 0;
static void timer_cb(uv_handle_t* handle, int status) {
ASSERT(handle == &timer_handle);
ASSERT(handle == (uv_handle_t*)&timer_handle);
ASSERT(status == 0);
timer_cb_called++;
@ -127,7 +127,7 @@ static void idle_2_cb(uv_handle_t* handle, int status) {
LOG("IDLE_2_CB\n");
ASSERT(handle == &idle_2_handle);
ASSERT(handle == (uv_handle_t*)&idle_2_handle);
ASSERT(status == 0);
idle_2_cb_called++;
@ -140,7 +140,7 @@ static void idle_2_cb(uv_handle_t* handle, int status) {
static void idle_2_close_cb(uv_handle_t* handle, int status){
LOG("IDLE_2_CLOSE_CB\n");
ASSERT(handle == &idle_2_handle);
ASSERT(handle == (uv_handle_t*)&idle_2_handle);
ASSERT(status == 0);
ASSERT(idle_2_is_active);
@ -173,7 +173,7 @@ static void idle_1_cb(uv_handle_t* handle, int status) {
idle_1_cb_called++;
if (idle_1_cb_called % 5 == 0) {
r = uv_idle_stop(handle);
r = uv_idle_stop((uv_idle_t*)handle);
ASSERT(r == 0);
idles_1_active--;
}
@ -195,7 +195,7 @@ static void check_cb(uv_handle_t* handle, int status) {
LOG("CHECK_CB\n");
ASSERT(handle == &check_handle);
ASSERT(handle == (uv_handle_t*)&check_handle);
ASSERT(status == 0);
/* XXX
@ -213,22 +213,22 @@ static void check_cb(uv_handle_t* handle, int status) {
} else {
/* End of the test - close all handles */
r = uv_close(&prepare_1_handle);
r = uv_close((uv_handle_t*)&prepare_1_handle);
ASSERT(r == 0);
r = uv_close(&check_handle);
r = uv_close((uv_handle_t*)&check_handle);
ASSERT(r == 0);
r = uv_close(&prepare_2_handle);
r = uv_close((uv_handle_t*)&prepare_2_handle);
ASSERT(r == 0);
for (i = 0; i < IDLE_COUNT; i++) {
r = uv_close(&idle_1_handles[i]);
r = uv_close((uv_handle_t*)&idle_1_handles[i]);
ASSERT(r == 0);
}
/* This handle is closed/recreated every time, close it only if it is */
/* active.*/
if (idle_2_is_active) {
r = uv_close(&idle_2_handle);
r = uv_close((uv_handle_t*)&idle_2_handle);
ASSERT(r == 0);
}
}
@ -239,7 +239,7 @@ static void check_cb(uv_handle_t* handle, int status) {
static void check_close_cb(uv_handle_t* handle, int status){
LOG("CHECK_CLOSE_CB\n");
ASSERT(handle == &check_handle);
ASSERT(handle == (uv_handle_t*)&check_handle);
ASSERT(status == 0);
check_close_cb_called++;
@ -251,7 +251,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_2_CB\n");
ASSERT(handle == &prepare_2_handle);
ASSERT(handle == (uv_handle_t*)&prepare_2_handle);
ASSERT(status == 0);
/* XXX ASSERT(idles_1_active == 0); */
@ -263,7 +263,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
/* (loop_iteration % 2 == 0) cannot be true. */
ASSERT(loop_iteration % 2 != 0);
r = uv_prepare_stop(handle);
r = uv_prepare_stop((uv_prepare_t*)handle);
ASSERT(r == 0);
prepare_2_cb_called++;
@ -272,7 +272,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
static void prepare_2_close_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_2_CLOSE_CB\n");
ASSERT(handle == &prepare_2_handle);
ASSERT(handle == (uv_handle_t*)&prepare_2_handle);
ASSERT(status == 0);
prepare_2_close_cb_called++;
@ -284,7 +284,7 @@ static void prepare_1_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_1_CB\n");
ASSERT(handle == &prepare_1_handle);
ASSERT(handle == (uv_handle_t*)&prepare_1_handle);
ASSERT(status == 0);
/* XXX
@ -306,14 +306,14 @@ static void prepare_1_cb(uv_handle_t* handle, int status) {
static void prepare_1_close_cb(uv_handle_t* handle, int status){
LOG("PREPARE_1_CLOSE_CB");
ASSERT(handle == &prepare_1_handle);
ASSERT(handle == (uv_handle_t*)&prepare_1_handle);
ASSERT(status == 0);
prepare_1_close_cb_called++;
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t rv = { 0, 0 };
FATAL("alloc_cb should never be called in this test");
return rv;
@ -394,43 +394,43 @@ TEST_IMPL(ref) {
TEST_IMPL(idle_ref) {
uv_handle_t h;
uv_idle_t h;
uv_init(alloc_cb);
uv_idle_init(&h, NULL, NULL);
uv_idle_start(&h, NULL);
uv_unref(&h);
uv_unref();
uv_run();
return 0;
}
TEST_IMPL(async_ref) {
uv_handle_t h;
uv_async_t h;
uv_init(alloc_cb);
uv_async_init(&h, NULL, NULL, NULL);
uv_unref(&h);
uv_unref();
uv_run();
return 0;
}
TEST_IMPL(prepare_ref) {
uv_handle_t h;
uv_prepare_t h;
uv_init(alloc_cb);
uv_prepare_init(&h, NULL, NULL);
uv_prepare_start(&h, NULL);
uv_unref(&h);
uv_unref();
uv_run();
return 0;
}
TEST_IMPL(check_ref) {
uv_handle_t h;
uv_check_t h;
uv_init(alloc_cb);
uv_check_init(&h, NULL, NULL);
uv_check_start(&h, NULL);
uv_unref(&h);
uv_unref();
uv_run();
return 0;
}

View File

@ -39,7 +39,7 @@ static char PING[] = "PING\n";
typedef struct {
int pongs;
int state;
uv_handle_t handle;
uv_tcp_t tcp;
uv_req_t connect_req;
uv_req_t read_req;
char read_buffer[BUFSIZE];
@ -75,7 +75,7 @@ static void pinger_write_ping(pinger_t* pinger) {
buf.len = strlen(PING);
req = (uv_req_t*)malloc(sizeof(*req));
uv_req_init(req, &pinger->handle, pinger_after_write);
uv_req_init(req, (uv_handle_t*)(&pinger->tcp), pinger_after_write);
if (uv_write(req, &buf, 1)) {
FATAL("uv_write failed");
@ -85,11 +85,11 @@ static void pinger_write_ping(pinger_t* pinger) {
}
static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
unsigned int i;
pinger_t* pinger;
pinger = (pinger_t*)handle->data;
pinger = (pinger_t*)tcp->data;
if (nread < 0) {
ASSERT(uv_last_error().code == UV_EOF);
@ -100,7 +100,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
free(buf.base);
}
uv_close(&pinger->handle);
uv_close((uv_handle_t*)(&pinger->tcp));
return;
}
@ -115,7 +115,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
if (pinger->pongs < NUM_PINGS) {
pinger_write_ping(pinger);
} else {
uv_close(&pinger->handle);
uv_close((uv_handle_t*)(&pinger->tcp));
return;
}
}
@ -130,7 +130,7 @@ static void pinger_on_connect(uv_req_t *req, int status) {
pinger_write_ping(pinger);
uv_read_start(req->handle, pinger_read_cb);
uv_read_start((uv_tcp_t*)(req->handle), pinger_read_cb);
}
@ -144,19 +144,20 @@ static void pinger_new() {
pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(&pinger->handle, pinger_on_close, (void*)pinger);
r = uv_tcp_init(&pinger->tcp, pinger_on_close, (void*)pinger);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
uv_req_init(&pinger->connect_req, &pinger->handle, pinger_on_connect);
uv_req_init(&pinger->connect_req, (uv_handle_t*)(&pinger->tcp),
pinger_on_connect);
r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
ASSERT(!r);
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;

View File

@ -59,8 +59,10 @@ static void shutdown_cb(uv_req_t* req, int status) {
ASSERT(req);
ASSERT(status == 0);
uv_tcp_t* tcp = (uv_tcp_t*)(req->handle);
/* The write buffer should be empty by now. */
ASSERT(req->handle->write_queue_size == 0);
ASSERT(tcp->write_queue_size == 0);
/* Now we wait for the EOF */
shutdown_cb_called++;
@ -72,8 +74,8 @@ static void shutdown_cb(uv_req_t* req, int status) {
}
static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
ASSERT(handle != NULL);
static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
ASSERT(tcp != NULL);
if (nread < 0) {
ASSERT(uv_last_error().code == UV_EOF);
@ -83,7 +85,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
free(buf.base);
}
uv_close(handle);
uv_close((uv_handle_t*)tcp);
return;
}
@ -111,13 +113,13 @@ static void write_cb(uv_req_t* req, int status) {
static void connect_cb(uv_req_t* req, int status) {
uv_buf_t send_bufs[CHUNKS_PER_WRITE];
uv_handle_t* handle;
uv_tcp_t* tcp;
int i, j, r;
ASSERT(req != NULL);
ASSERT(status == 0);
handle = req->handle;
tcp = (uv_tcp_t*)req->handle;
connect_cb_called++;
free(req);
@ -133,7 +135,7 @@ static void connect_cb(uv_req_t* req, int status) {
req = (uv_req_t*)malloc(sizeof *req);
ASSERT(req != NULL);
uv_req_init(req, handle, write_cb);
uv_req_init(req, (uv_handle_t*)tcp, write_cb);
r = uv_write(req, (uv_buf_t*)&send_bufs, CHUNKS_PER_WRITE);
ASSERT(r == 0);
}
@ -141,7 +143,7 @@ static void connect_cb(uv_req_t* req, int status) {
/* Shutdown on drain. FIXME: dealloc req? */
req = (uv_req_t*) malloc(sizeof(uv_req_t));
ASSERT(req != NULL);
uv_req_init(req, handle, shutdown_cb);
uv_req_init(req, (uv_handle_t*)tcp, shutdown_cb);
r = uv_shutdown(req);
ASSERT(r == 0);
@ -149,13 +151,13 @@ static void connect_cb(uv_req_t* req, int status) {
req = (uv_req_t*)malloc(sizeof *req);
ASSERT(req != NULL);
uv_req_init(req, handle, read_cb);
r = uv_read_start(handle, read_cb);
uv_req_init(req, (uv_handle_t*)tcp, read_cb);
r = uv_read_start(tcp, read_cb);
ASSERT(r == 0);
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;
@ -165,7 +167,7 @@ static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
TEST_IMPL(tcp_writealot) {
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client);
uv_tcp_t* client = (uv_tcp_t*)malloc(sizeof *client);
uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
int r;
@ -181,7 +183,7 @@ TEST_IMPL(tcp_writealot) {
r = uv_tcp_init(client, close_cb, NULL);
ASSERT(r == 0);
uv_req_init(connect_req, client, connect_cb);
uv_req_init(connect_req, (uv_handle_t*)client, connect_cb);
r = uv_connect(connect_req, (struct sockaddr*)&addr);
ASSERT(r == 0);

View File

@ -29,7 +29,7 @@ static int repeat_2_cb_called = 0;
static int repeat_2_cb_allowed = 0;
static uv_handle_t dummy, repeat_1, repeat_2;
static uv_timer_t dummy, repeat_1, repeat_2;
static int64_t start_time;
@ -45,10 +45,10 @@ static void close_cb(uv_handle_t* handle, int status) {
static void repeat_1_cb(uv_handle_t* handle, int status) {
int r;
ASSERT(handle == &repeat_1);
ASSERT(handle == (uv_handle_t*)&repeat_1);
ASSERT(status == 0);
ASSERT(uv_timer_get_repeat(handle) == 50);
ASSERT(uv_timer_get_repeat((uv_timer_t*)handle) == 50);
LOGF("repeat_1_cb called after %ld ms\n", (long int)(uv_now() - start_time));
@ -68,7 +68,7 @@ static void repeat_1_cb(uv_handle_t* handle, int status) {
static void repeat_2_cb(uv_handle_t* handle, int status) {
ASSERT(handle == &repeat_2);
ASSERT(handle == (uv_handle_t*) &repeat_2);
ASSERT(status == 0);
ASSERT(repeat_2_cb_allowed);
@ -76,21 +76,22 @@ static void repeat_2_cb(uv_handle_t* handle, int status) {
repeat_2_cb_called++;
if (uv_timer_get_repeat(handle) == 0) {
if (uv_timer_get_repeat(&repeat_2) == 0) {
ASSERT(!uv_is_active(handle));
uv_close(handle);
return;
}
LOGF("uv_timer_get_repeat %ld ms\n", (long int)uv_timer_get_repeat(handle));
ASSERT(uv_timer_get_repeat(handle) == 100);
LOGF("uv_timer_get_repeat %ld ms\n",
(long int)uv_timer_get_repeat(&repeat_2));
ASSERT(uv_timer_get_repeat(&repeat_2) == 100);
/* This shouldn't take effect immediately. */
uv_timer_set_repeat(&repeat_2, 0);
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf = {0, 0};
FATAL("alloc should not be called");
return buf;

View File

@ -92,7 +92,7 @@ static void never_cb(uv_handle_t* handle, int status) {
}
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf = {0, 0};
FATAL("alloc should not be called");
return buf;
@ -100,8 +100,8 @@ static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
TEST_IMPL(timer) {
uv_handle_t *once;
uv_handle_t repeat, never;
uv_timer_t *once;
uv_timer_t repeat, never;
int i, r;
uv_init(alloc_cb);
@ -111,7 +111,7 @@ TEST_IMPL(timer) {
/* Let 10 timers time out in 500 ms total. */
for (i = 0; i < 10; i++) {
once = (uv_handle_t*)malloc(sizeof(*once));
once = (uv_timer_t*)malloc(sizeof(*once));
ASSERT(once != NULL);
r = uv_timer_init(once, once_close_cb, NULL);
ASSERT(r == 0);

604
uv-unix.c

File diff suppressed because it is too large Load Diff

View File

@ -38,7 +38,7 @@ typedef struct {
} uv_buf_t;
#define uv_req_private_fields \
#define UV_REQ_PRIVATE_FIELDS \
int write_index; \
ev_timer timer; \
ngx_queue_t queue; \
@ -47,11 +47,14 @@ typedef struct {
/* TODO: union or classes please! */
#define uv_handle_private_fields \
#define UV_HANDLE_PRIVATE_FIELDS \
int fd; \
int flags; \
ev_idle next_watcher; \
/* UV_TCP */ \
ev_idle next_watcher;
/* UV_TCP */
#define UV_TCP_PRIVATE_FIELDS \
int delayed_error; \
uv_read_cb read_cb; \
uv_accept_cb accept_cb; \
@ -60,20 +63,35 @@ typedef struct {
uv_req_t *shutdown_req; \
ev_io read_watcher; \
ev_io write_watcher; \
ngx_queue_t write_queue; \
ngx_queue_t write_queue;
/* UV_PREPARE */ \
#define UV_PREPARE_PRIVATE_FIELDS \
ev_prepare prepare_watcher; \
uv_loop_cb prepare_cb; \
/* UV_CHECK */ \
uv_loop_cb prepare_cb;
/* UV_CHECK */
#define UV_CHECK_PRIVATE_FIELDS \
ev_check check_watcher; \
uv_loop_cb check_cb; \
/* UV_IDLE */ \
uv_loop_cb check_cb;
/* UV_IDLE */
#define UV_IDLE_PRIVATE_FIELDS \
ev_idle idle_watcher; \
uv_loop_cb idle_cb; \
/* UV_ASYNC */ \
uv_loop_cb idle_cb;
/* UV_ASYNC */
#define UV_ASYNC_PRIVATE_FIELDS \
ev_async async_watcher; \
uv_loop_cb async_cb; \
/* UV_TIMER */ \
uv_loop_cb async_cb;
/* UV_TIMER */
#define UV_TIMER_PRIVATE_FIELDS \
ev_timer timer_watcher; \
uv_loop_cb timer_cb;

205
uv-win.c
View File

@ -141,12 +141,12 @@ static LPFN_TRANSMITFILE pTransmitFile;
/* Binary tree used to keep the list of timers sorted. */
static int uv_timer_compare(uv_handle_t* handle1, uv_handle_t* handle2);
RB_HEAD(uv_timer_s, uv_handle_s);
RB_PROTOTYPE_STATIC(uv_timer_s, uv_handle_s, tree_entry, uv_timer_compare);
static int uv_timer_compare(uv_timer_t* handle1, uv_timer_t* handle2);
RB_HEAD(uv_timer_tree_s, uv_timer_s);
RB_PROTOTYPE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare);
/* The head of the timers tree */
static struct uv_timer_s uv_timers_ = RB_INITIALIZER(uv_timers_);
static struct uv_timer_tree_s uv_timers_ = RB_INITIALIZER(uv_timers_);
/* Lists of active uv_prepare / uv_check / uv_idle watchers */
@ -422,7 +422,7 @@ static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) {
}
static int uv_tcp_init_socket(uv_handle_t* handle, uv_close_cb close_cb,
static int uv_tcp_init_socket(uv_tcp_t* handle, uv_close_cb close_cb,
void* data, SOCKET socket) {
DWORD yes = 1;
@ -464,15 +464,14 @@ static int uv_tcp_init_socket(uv_handle_t* handle, uv_close_cb close_cb,
}
static void uv_tcp_init_connection(uv_handle_t* handle) {
static void uv_tcp_init_connection(uv_tcp_t* handle) {
handle->flags |= UV_HANDLE_CONNECTION;
handle->write_reqs_pending = 0;
uv_req_init(&(handle->read_req), handle, NULL);
uv_req_init(&(handle->read_req), (uv_handle_t*)handle, NULL);
}
int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb,
void* data) {
int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data) {
SOCKET sock;
sock = socket(AF_INET, SOCK_STREAM, 0);
@ -490,7 +489,7 @@ int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb,
}
static void uv_tcp_endgame(uv_handle_t* handle) {
static void uv_tcp_endgame(uv_tcp_t* handle) {
uv_err_t err;
int status;
@ -520,7 +519,7 @@ static void uv_tcp_endgame(uv_handle_t* handle) {
!(handle->flags & UV_HANDLE_CLOSING)) {
/* Because uv_close will add the handle to the endgame_handles list, */
/* return here and call the close cb the next time. */
uv_close(handle);
uv_close((uv_handle_t*)handle);
return;
}
@ -531,7 +530,7 @@ static void uv_tcp_endgame(uv_handle_t* handle) {
if (handle->close_cb) {
uv_last_error_ = handle->error;
handle->close_cb(handle, handle->error.code == UV_OK ? 0 : 1);
handle->close_cb((uv_handle_t*)handle, handle->error.code == UV_OK ? 0 : 1);
}
uv_refs_--;
@ -539,13 +538,13 @@ static void uv_tcp_endgame(uv_handle_t* handle) {
}
static void uv_timer_endgame(uv_handle_t* handle) {
static void uv_timer_endgame(uv_timer_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
handle->close_cb(handle, 0);
handle->close_cb((uv_handle_t*)handle, 0);
}
uv_refs_--;
@ -567,14 +566,14 @@ static void uv_loop_endgame(uv_handle_t* handle) {
}
static void uv_async_endgame(uv_handle_t* handle) {
static void uv_async_endgame(uv_async_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING &&
!handle->async_sent) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
handle->close_cb(handle, 0);
handle->close_cb((uv_handle_t*)handle, 0);
}
uv_refs_--;
@ -593,11 +592,11 @@ static void uv_call_endgames() {
switch (handle->type) {
case UV_TCP:
uv_tcp_endgame(handle);
uv_tcp_endgame((uv_tcp_t*)handle);
break;
case UV_TIMER:
uv_timer_endgame(handle);
uv_timer_endgame((uv_timer_t*)handle);
break;
case UV_PREPARE:
@ -607,7 +606,7 @@ static void uv_call_endgames() {
break;
case UV_ASYNC:
uv_async_endgame(handle);
uv_async_endgame((uv_async_t*)handle);
break;
default:
@ -629,6 +628,8 @@ static void uv_want_endgame(uv_handle_t* handle) {
static int uv_close_error(uv_handle_t* handle, uv_err_t e) {
uv_tcp_t* tcp;
if (handle->flags & UV_HANDLE_CLOSING) {
return 0;
}
@ -639,34 +640,35 @@ static int uv_close_error(uv_handle_t* handle, uv_err_t e) {
/* Handle-specific close actions */
switch (handle->type) {
case UV_TCP:
closesocket(handle->socket);
if (handle->reqs_pending == 0) {
tcp = (uv_tcp_t*)handle;
closesocket(tcp->socket);
if (tcp->reqs_pending == 0) {
uv_want_endgame(handle);
}
return 0;
case UV_TIMER:
uv_timer_stop(handle);
uv_timer_stop((uv_timer_t*)handle);
uv_want_endgame(handle);
return 0;
case UV_PREPARE:
uv_prepare_stop(handle);
uv_prepare_stop((uv_prepare_t*)handle);
uv_want_endgame(handle);
return 0;
case UV_CHECK:
uv_check_stop(handle);
uv_check_stop((uv_check_t*)handle);
uv_want_endgame(handle);
return 0;
case UV_IDLE:
uv_idle_stop(handle);
uv_idle_stop((uv_idle_t*)handle);
uv_want_endgame(handle);
return 0;
case UV_ASYNC:
if (!handle->async_sent) {
if (!((uv_async_t*)handle)->async_sent) {
uv_want_endgame(handle);
}
return 0;
@ -695,7 +697,7 @@ struct sockaddr_in uv_ip4_addr(char* ip, int port) {
}
int uv_bind(uv_handle_t* handle, struct sockaddr* addr) {
int uv_bind(uv_tcp_t* handle, struct sockaddr* addr) {
int addrsize;
DWORD err;
@ -726,7 +728,7 @@ int uv_bind(uv_handle_t* handle, struct sockaddr* addr) {
}
static void uv_queue_accept(uv_handle_t* handle) {
static void uv_queue_accept(uv_tcp_t* handle) {
uv_req_t* req;
BOOL success;
DWORD bytes;
@ -737,7 +739,7 @@ static void uv_queue_accept(uv_handle_t* handle) {
accept_socket = socket(AF_INET, SOCK_STREAM, 0);
if (accept_socket == INVALID_SOCKET) {
uv_close_error(handle, uv_new_sys_error(WSAGetLastError()));
uv_close_error((uv_handle_t*)handle, uv_new_sys_error(WSAGetLastError()));
return;
}
@ -762,7 +764,7 @@ static void uv_queue_accept(uv_handle_t* handle) {
/* destroy the preallocated client handle */
closesocket(accept_socket);
/* destroy ourselves */
uv_close_error(handle, uv_last_error_);
uv_close_error((uv_handle_t*)handle, uv_last_error_);
return;
}
@ -773,7 +775,7 @@ static void uv_queue_accept(uv_handle_t* handle) {
}
static void uv_queue_read(uv_handle_t* handle) {
static void uv_queue_read(uv_tcp_t* handle) {
uv_req_t *req;
uv_buf_t buf;
int result;
@ -799,7 +801,7 @@ static void uv_queue_read(uv_handle_t* handle) {
NULL);
if (result != 0 && WSAGetLastError() != ERROR_IO_PENDING) {
uv_set_sys_error(WSAGetLastError());
uv_close_error(handle, uv_last_error_);
uv_close_error((uv_handle_t*)handle, uv_last_error_);
return;
}
@ -808,7 +810,7 @@ static void uv_queue_read(uv_handle_t* handle) {
}
int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) {
int uv_listen(uv_tcp_t* handle, int backlog, uv_accept_cb cb) {
assert(backlog > 0);
if (handle->flags & UV_HANDLE_BIND_ERROR) {
@ -831,14 +833,14 @@ int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) {
handle->flags |= UV_HANDLE_LISTENING;
handle->accept_cb = cb;
uv_req_init(&(handle->accept_req), handle, NULL);
uv_req_init(&(handle->accept_req), (uv_handle_t*)handle, NULL);
uv_queue_accept(handle);
return 0;
}
int uv_accept(uv_handle_t* server, uv_handle_t* client,
int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
uv_close_cb close_cb, void* data) {
int rv = 0;
@ -865,7 +867,7 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client,
}
int uv_read_start(uv_handle_t* handle, uv_read_cb cb) {
int uv_read_start(uv_tcp_t* handle, uv_read_cb cb) {
if (!(handle->flags & UV_HANDLE_CONNECTION)) {
uv_set_sys_error(WSAEINVAL);
return -1;
@ -893,7 +895,7 @@ int uv_read_start(uv_handle_t* handle, uv_read_cb cb) {
}
int uv_read_stop(uv_handle_t* handle) {
int uv_read_stop(uv_tcp_t* handle) {
handle->flags &= ~UV_HANDLE_READING;
return 0;
@ -904,7 +906,7 @@ int uv_connect(uv_req_t* req, struct sockaddr* addr) {
int addrsize;
BOOL success;
DWORD bytes;
uv_handle_t* handle = req->handle;
uv_tcp_t* handle = (uv_tcp_t*)req->handle;
assert(!(req->flags & UV_REQ_PENDING));
@ -965,7 +967,7 @@ static size_t uv_count_bufs(uv_buf_t bufs[], int count) {
int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) {
int result;
DWORD bytes, err;
uv_handle_t* handle = req->handle;
uv_tcp_t* handle = (uv_tcp_t*) req->handle;
assert(!(req->flags & UV_REQ_PENDING));
@ -1016,7 +1018,7 @@ int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt) {
int uv_shutdown(uv_req_t* req) {
uv_handle_t* handle = req->handle;
uv_tcp_t* handle = (uv_tcp_t*) req->handle;
int status = 0;
if (!(req->handle->flags & UV_HANDLE_CONNECTION)) {
@ -1036,13 +1038,13 @@ int uv_shutdown(uv_req_t* req) {
handle->shutdown_req = req;
handle->reqs_pending++;
uv_want_endgame(handle);
uv_want_endgame((uv_handle_t*)handle);
return 0;
}
static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
BOOL success;
DWORD bytes, flags, err;
uv_buf_t buf;
@ -1058,7 +1060,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
handle->write_queue_size -= req->queued_bytes;
if (!success) {
uv_set_sys_error(GetLastError());
uv_close_error(handle, uv_last_error_);
uv_close_error((uv_handle_t*)handle, uv_last_error_);
}
if (req->cb) {
((uv_write_cb)req->cb)(req, success ? 0 : -1);
@ -1067,7 +1069,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
if (success &&
handle->write_reqs_pending == 0 &&
handle->flags & UV_HANDLE_SHUTTING) {
uv_want_endgame(handle);
uv_want_endgame((uv_handle_t*)handle);
}
break;
@ -1075,7 +1077,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
success = GetOverlappedResult(handle->handle, &req->overlapped, &bytes, FALSE);
if (!success) {
uv_set_sys_error(GetLastError());
uv_close_error(handle, uv_last_error_);
uv_close_error((uv_handle_t*)handle, uv_last_error_);
}
while (handle->flags & UV_HANDLE_READING) {
buf = uv_alloc_(handle, 65536);
@ -1102,7 +1104,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
uv_last_error_.code = UV_EOF;
uv_last_error_.sys_errno_ = ERROR_SUCCESS;
((uv_read_cb)handle->read_cb)(handle, -1, buf);
uv_want_endgame(handle);
uv_want_endgame((uv_handle_t*)handle);
break;
}
} else {
@ -1114,7 +1116,7 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
} else {
/* Ouch! serious error. */
uv_set_sys_error(err);
uv_close_error(handle, uv_last_error_);
uv_close_error((uv_handle_t*)handle, uv_last_error_);
}
break;
}
@ -1186,12 +1188,12 @@ static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
/* more pending requests. */
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
uv_want_endgame(handle);
uv_want_endgame((uv_handle_t*)handle);
}
}
static int uv_timer_compare(uv_handle_t* a, uv_handle_t* b) {
static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) {
if (a->due < b->due)
return -1;
if (a->due > b->due)
@ -1204,10 +1206,10 @@ static int uv_timer_compare(uv_handle_t* a, uv_handle_t* b) {
}
RB_GENERATE_STATIC(uv_timer_s, uv_handle_s, tree_entry, uv_timer_compare);
RB_GENERATE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare);
int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
int uv_timer_init(uv_timer_t* handle, uv_close_cb close_cb, void* data) {
handle->type = UV_TIMER;
handle->close_cb = (void*) close_cb;
handle->data = data;
@ -1222,9 +1224,9 @@ int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
}
int uv_timer_start(uv_handle_t* handle, uv_loop_cb timer_cb, int64_t timeout, int64_t repeat) {
int uv_timer_start(uv_timer_t* handle, uv_loop_cb timer_cb, int64_t timeout, int64_t repeat) {
if (handle->flags & UV_HANDLE_ACTIVE) {
RB_REMOVE(uv_timer_s, &uv_timers_, handle);
RB_REMOVE(uv_timer_tree_s, &uv_timers_, handle);
}
handle->timer_cb = (void*) timer_cb;
@ -1232,7 +1234,7 @@ int uv_timer_start(uv_handle_t* handle, uv_loop_cb timer_cb, int64_t timeout, in
handle->repeat = repeat;
handle->flags |= UV_HANDLE_ACTIVE;
if (RB_INSERT(uv_timer_s, &uv_timers_, handle) != NULL) {
if (RB_INSERT(uv_timer_tree_s, &uv_timers_, handle) != NULL) {
uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT");
}
@ -1240,11 +1242,11 @@ int uv_timer_start(uv_handle_t* handle, uv_loop_cb timer_cb, int64_t timeout, in
}
int uv_timer_stop(uv_handle_t* handle) {
int uv_timer_stop(uv_timer_t* handle) {
if (!(handle->flags & UV_HANDLE_ACTIVE))
return 0;
RB_REMOVE(uv_timer_s, &uv_timers_, handle);
RB_REMOVE(uv_timer_tree_s, &uv_timers_, handle);
handle->flags &= ~UV_HANDLE_ACTIVE;
@ -1252,7 +1254,7 @@ int uv_timer_stop(uv_handle_t* handle) {
}
int uv_timer_again(uv_handle_t* handle) {
int uv_timer_again(uv_timer_t* handle) {
/* If timer_cb is NULL that means that the timer was never started. */
if (!handle->timer_cb) {
uv_set_sys_error(ERROR_INVALID_DATA);
@ -1260,14 +1262,14 @@ int uv_timer_again(uv_handle_t* handle) {
}
if (handle->flags & UV_HANDLE_ACTIVE) {
RB_REMOVE(uv_timer_s, &uv_timers_, handle);
RB_REMOVE(uv_timer_tree_s, &uv_timers_, handle);
handle->flags &= ~UV_HANDLE_ACTIVE;
}
if (handle->repeat) {
handle->due = uv_now_ + handle->repeat;
if (RB_INSERT(uv_timer_s, &uv_timers_, handle) != NULL) {
if (RB_INSERT(uv_timer_tree_s, &uv_timers_, handle) != NULL) {
uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT");
}
@ -1278,13 +1280,13 @@ int uv_timer_again(uv_handle_t* handle) {
}
void uv_timer_set_repeat(uv_handle_t* handle, int64_t repeat) {
void uv_timer_set_repeat(uv_timer_t* handle, int64_t repeat) {
assert(handle->type == UV_TIMER);
handle->repeat = repeat;
}
int64_t uv_timer_get_repeat(uv_handle_t* handle) {
int64_t uv_timer_get_repeat(uv_timer_t* handle) {
assert(handle->type == UV_TIMER);
return handle->repeat;
}
@ -1383,57 +1385,57 @@ static void uv_loop_invoke(uv_handle_t* list) {
}
int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
int uv_prepare_init(uv_prepare_t* handle, uv_close_cb close_cb, void* data) {
handle->type = UV_PREPARE;
return uv_loop_init(handle, close_cb, data);
return uv_loop_init((uv_handle_t*)handle, close_cb, data);
}
int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
int uv_check_init(uv_check_t* handle, uv_close_cb close_cb, void* data) {
handle->type = UV_CHECK;
return uv_loop_init(handle, close_cb, data);
return uv_loop_init((uv_handle_t*)handle, close_cb, data);
}
int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
int uv_idle_init(uv_idle_t* handle, uv_close_cb close_cb, void* data) {
handle->type = UV_IDLE;
return uv_loop_init(handle, close_cb, data);
return uv_loop_init((uv_handle_t*)handle, close_cb, data);
}
int uv_prepare_start(uv_handle_t* handle, uv_loop_cb loop_cb) {
int uv_prepare_start(uv_prepare_t* handle, uv_loop_cb loop_cb) {
assert(handle->type == UV_PREPARE);
return uv_loop_start(handle, loop_cb, &uv_prepare_handles_);
return uv_loop_start((uv_handle_t*)handle, loop_cb, &uv_prepare_handles_);
}
int uv_check_start(uv_handle_t* handle, uv_loop_cb loop_cb) {
int uv_check_start(uv_check_t* handle, uv_loop_cb loop_cb) {
assert(handle->type == UV_CHECK);
return uv_loop_start(handle, loop_cb, &uv_check_handles_);
return uv_loop_start((uv_handle_t*)handle, loop_cb, &uv_check_handles_);
}
int uv_idle_start(uv_handle_t* handle, uv_loop_cb loop_cb) {
int uv_idle_start(uv_idle_t* handle, uv_loop_cb loop_cb) {
assert(handle->type == UV_IDLE);
return uv_loop_start(handle, loop_cb, &uv_idle_handles_);
return uv_loop_start((uv_handle_t*)handle, loop_cb, &uv_idle_handles_);
}
int uv_prepare_stop(uv_handle_t* handle) {
int uv_prepare_stop(uv_prepare_t* handle) {
assert(handle->type == UV_PREPARE);
return uv_loop_stop(handle, &uv_prepare_handles_);
return uv_loop_stop((uv_handle_t*)handle, &uv_prepare_handles_);
}
int uv_check_stop(uv_handle_t* handle) {
int uv_check_stop(uv_check_t* handle) {
assert(handle->type == UV_CHECK);
return uv_loop_stop(handle, &uv_check_handles_);
return uv_loop_stop((uv_handle_t*)handle, &uv_check_handles_);
}
int uv_idle_stop(uv_handle_t* handle) {
int uv_idle_stop(uv_idle_t* handle) {
assert(handle->type == UV_IDLE);
return uv_loop_stop(handle, &uv_idle_handles_);
return uv_loop_stop((uv_handle_t*)handle, &uv_idle_handles_);
}
@ -1451,7 +1453,7 @@ int uv_is_active(uv_handle_t* handle) {
}
int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
int uv_async_init(uv_async_t* handle, uv_async_cb async_cb,
uv_close_cb close_cb, void* data) {
uv_req_t* req;
@ -1463,7 +1465,7 @@ int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
handle->error = uv_ok_;
req = &handle->async_req;
uv_req_init(req, handle, async_cb);
uv_req_init(req, (uv_handle_t*)handle, async_cb);
req->type = UV_WAKEUP;
uv_refs_++;
@ -1472,7 +1474,7 @@ int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
}
int uv_async_send(uv_handle_t* handle) {
int uv_async_send(uv_async_t* handle) {
if (handle->type != UV_ASYNC) {
/* Can't set errno because that's not thread-safe. */
return -1;
@ -1495,16 +1497,16 @@ int uv_async_send(uv_handle_t* handle) {
}
static void uv_async_return_req(uv_handle_t* handle, uv_req_t* req) {
static void uv_async_return_req(uv_async_t* handle, uv_req_t* req) {
assert(handle->type == UV_ASYNC);
assert(req->type == UV_WAKEUP);
handle->async_sent = 0;
if (req->cb) {
((uv_async_cb)req->cb)(handle, 0);
((uv_async_cb)req->cb)((uv_handle_t*)handle, 0);
}
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(handle);
uv_want_endgame((uv_handle_t*)handle);
}
}
@ -1515,6 +1517,7 @@ static void uv_poll() {
ULONG_PTR key;
OVERLAPPED* overlapped;
uv_req_t* req;
uv_timer_t* timer;
uv_handle_t* handle;
DWORD timeout;
int64_t delta;
@ -1530,9 +1533,9 @@ static void uv_poll() {
uv_update_time();
/* Check if there are any running timers */
handle = RB_MIN(uv_timer_s, &uv_timers_);
if (handle) {
delta = handle->due - uv_now_;
timer = RB_MIN(uv_timer_tree_s, &uv_timers_);
if (timer) {
delta = timer->due - uv_now_;
if (delta >= UINT_MAX) {
/* Can't have a timeout greater than UINT_MAX, and a timeout value of */
/* UINT_MAX means infinite, so that's no good either. */
@ -1560,26 +1563,26 @@ static void uv_poll() {
uv_loop_invoke(uv_check_handles_);
/* Call timer callbacks */
for (handle = RB_MIN(uv_timer_s, &uv_timers_);
handle != NULL && handle->due <= uv_now_;
handle = RB_MIN(uv_timer_s, &uv_timers_)) {
RB_REMOVE(uv_timer_s, &uv_timers_, handle);
for (timer = RB_MIN(uv_timer_tree_s, &uv_timers_);
timer != NULL && timer->due <= uv_now_;
timer = RB_MIN(uv_timer_tree_s, &uv_timers_)) {
RB_REMOVE(uv_timer_tree_s, &uv_timers_, timer);
if (handle->repeat != 0) {
if (timer->repeat != 0) {
/* If it is a repeating timer, reschedule with repeat timeout. */
handle->due += handle->repeat;
if (handle->due < uv_now_) {
handle->due = uv_now_;
timer->due += timer->repeat;
if (timer->due < uv_now_) {
timer->due = uv_now_;
}
if (RB_INSERT(uv_timer_s, &uv_timers_, handle) != NULL) {
if (RB_INSERT(uv_timer_tree_s, &uv_timers_, timer) != NULL) {
uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT");
}
} else {
/* If non-repeating, mark the timer as inactive. */
handle->flags &= ~UV_HANDLE_ACTIVE;
timer->flags &= ~UV_HANDLE_ACTIVE;
}
((uv_loop_cb) handle->timer_cb)(handle, 0);
((uv_loop_cb) timer->timer_cb)((uv_handle_t*)timer, 0);
}
/* Only if a iocp package was dequeued... */
@ -1589,11 +1592,11 @@ static void uv_poll() {
switch (handle->type) {
case UV_TCP:
uv_tcp_return_req(handle, req);
uv_tcp_return_req((uv_tcp_t*)handle, req);
break;
case UV_ASYNC:
uv_async_return_req(handle, req);
uv_async_return_req((uv_async_t*)handle, req);
break;
default:

View File

@ -41,7 +41,7 @@ typedef struct uv_buf_t {
char* base;
} uv_buf_t;
#define uv_req_private_fields \
#define UV_REQ_PRIVATE_FIELDS \
union { \
/* Used by I/O operations */ \
struct { \
@ -63,7 +63,7 @@ typedef struct uv_buf_t {
struct uv_req_s accept_req; \
char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
#define uv_tcp_fields \
#define UV_TCP_PRIVATE_FIELDS \
unsigned int reqs_pending; \
union { \
SOCKET socket; \
@ -74,32 +74,36 @@ typedef struct uv_buf_t {
struct { uv_tcp_server_fields }; \
};
#define uv_timer_fields \
RB_ENTRY(uv_handle_s) tree_entry; \
#define UV_TIMER_PRIVATE_FIELDS \
RB_ENTRY(uv_timer_s) tree_entry; \
int64_t due; \
int64_t repeat; \
void* timer_cb;
#define uv_loop_fields \
#define UV_LOOP_PRIVATE_FIELDS \
uv_handle_t* loop_prev; \
uv_handle_t* loop_next; \
void* loop_cb;
#define uv_async_fields \
#define UV_ASYNC_PRIVATE_FIELDS \
struct uv_req_s async_req; \
/* char to avoid alignment issues */ \
char volatile async_sent;
#define uv_handle_private_fields \
#define UV_PREPARE_PRIVATE_FIELDS /* empty */
#define UV_CHECK_PRIVATE_FIELDS /* empty */
#define UV_IDLE_PRIVATE_FIELDS /* empty */
/*
* TODO: remove UV_LOOP_PRIVATE_FIELDS from UV_HANDLE_PRIVATE_FIELDS and
* use it in UV_(PREPARE|CHECK|IDLE)_PRIVATE_FIELDS instead.
*/
#define UV_HANDLE_PRIVATE_FIELDS \
uv_handle_t* endgame_next; \
unsigned int flags; \
uv_err_t error; \
union { \
struct { uv_tcp_fields }; \
struct { uv_timer_fields }; \
struct { uv_loop_fields }; \
struct { uv_async_fields }; \
};
UV_LOOP_PRIVATE_FIELDS
int uv_utf16_to_utf8(wchar_t* utf16Buffer, size_t utf16Size, char* utf8Buffer, size_t utf8Size);

278
uv.h
View File

@ -33,6 +33,11 @@ extern "C" {
typedef struct uv_err_s uv_err_t;
typedef struct uv_handle_s uv_handle_t;
typedef struct uv_tcp_s uv_tcp_t;
typedef struct uv_timer_s uv_timer_t;
typedef struct uv_prepare_s uv_prepare_t;
typedef struct uv_check_s uv_check_t;
typedef struct uv_idle_s uv_idle_t;
typedef struct uv_req_s uv_req_t;
@ -51,12 +56,12 @@ typedef struct uv_req_s uv_req_t;
* In the case of uv_read_cb the uv_buf_t returned should be freed by the
* user.
*/
typedef uv_buf_t (*uv_alloc_cb)(uv_handle_t* handle, size_t suggested_size);
typedef void (*uv_read_cb)(uv_handle_t *handle, int nread, uv_buf_t buf);
typedef uv_buf_t (*uv_alloc_cb)(uv_tcp_t* tcp, size_t suggested_size);
typedef void (*uv_read_cb)(uv_tcp_t* tcp, int nread, uv_buf_t buf);
typedef void (*uv_write_cb)(uv_req_t* req, int status);
typedef void (*uv_connect_cb)(uv_req_t* req, int status);
typedef void (*uv_shutdown_cb)(uv_req_t* req, int status);
typedef void (*uv_accept_cb)(uv_handle_t* handle);
typedef void (*uv_accept_cb)(uv_tcp_t* server);
typedef void (*uv_close_cb)(uv_handle_t* handle, int status);
/* TODO: do loop_cb and async_cb really need a status argument? */
typedef void (*uv_loop_cb)(uv_handle_t* handle, int status);
@ -142,70 +147,67 @@ struct uv_req_s {
void* cb;
void* data;
/* private */
uv_req_private_fields
UV_REQ_PRIVATE_FIELDS
};
struct uv_handle_s {
/* read-only */
uv_handle_type type;
/* public */
uv_close_cb close_cb;
void* data;
/* number of bytes queued for writing */
size_t write_queue_size;
/* private */
uv_handle_private_fields
};
/* Most functions return boolean: 0 for success and -1 for failure.
* On error the user should then call uv_last_error() to determine
* the error code.
*/
uv_err_t uv_last_error();
char* uv_strerror(uv_err_t err);
const char* uv_err_name(uv_err_t err);
void uv_init(uv_alloc_cb alloc);
int uv_run();
/* Manually modify the event loop's reference count. Useful if the user wants
* to have a handle or timeout that doesn't keep the loop alive.
*/
void uv_ref();
void uv_unref();
void uv_update_time();
int64_t uv_now();
void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb);
/*
* TODO:
* - uv_(pipe|pipe_tty)_handle_init
* - uv_bind_pipe(char* name)
* - uv_continuous_read(uv_handle_t* handle, uv_continuous_read_cb* cb)
* - A way to list cancelled uv_reqs after before/on uv_close_cb
* Initialize a request for use with uv_write, uv_shutdown, or uv_connect.
*/
void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb);
/* TCP socket methods.
* Handle and callback bust be set by calling uv_req_init.
#define UV_HANDLE_FIELDS \
/* read-only */ \
uv_handle_type type; \
/* public */ \
uv_close_cb close_cb; \
void* data; \
/* private */ \
UV_HANDLE_PRIVATE_FIELDS \
/* The abstract base class of all handles. */
struct uv_handle_s {
UV_HANDLE_FIELDS
};
/*
* Returns 1 if the prepare/check/idle handle has been started, 0 otherwise.
* For other handle types this always returns 1.
*/
int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
int uv_bind(uv_handle_t* handle, struct sockaddr* addr);
int uv_is_active(uv_handle_t* handle);
/*
* Request handle to be closed. close_cb will be called asynchronously after
* this call. This MUST be called on each handle before memory is released.
*/
int uv_close(uv_handle_t* handle);
/*
* A subclass of uv_handle_t representing a TCP stream or TCP server. In the
* future this will probably be split into two classes - one a stream and
* the other a server.
*/
struct uv_tcp_s {
UV_HANDLE_FIELDS
size_t write_queue_size; /* number of bytes queued for writing */
UV_TCP_PRIVATE_FIELDS
};
int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data);
int uv_bind(uv_tcp_t* handle, struct sockaddr* addr);
int uv_connect(uv_req_t* req, struct sockaddr* addr);
int uv_shutdown(uv_req_t* req);
/* TCP server methods. */
int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb);
int uv_listen(uv_tcp_t* handle, int backlog, uv_accept_cb cb);
/* Call this after accept_cb. client does not need to be initialized. */
int uv_accept(uv_handle_t* server, uv_handle_t* client,
int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
uv_close_cb close_cb, void* data);
/* Read data from an incoming stream. The callback will be made several
* several times until there is no more data to read or uv_read_stop is
* called. When we've reached EOF nread will be set to -1 and the error is
@ -215,77 +217,138 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client,
* eof; it happens when libuv requested a buffer through the alloc callback
* but then decided that it didn't need that buffer.
*/
int uv_read_start(uv_handle_t* handle, uv_read_cb cb);
int uv_read_stop(uv_handle_t* handle);
int uv_read_start(uv_tcp_t*, uv_read_cb cb);
int uv_read_stop(uv_tcp_t*);
int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt);
/* Timer methods */
int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
int uv_timer_start(uv_handle_t* handle, uv_loop_cb cb, int64_t timeout, int64_t repeat);
int uv_timer_stop(uv_handle_t* handle);
/*
* Subclass of uv_handle_t. libev wrapper. Every active prepare handle gets
* its callback called exactly once per loop iteration, just before the
* system blocks to wait for completed i/o.
*/
struct uv_prepare_s {
UV_HANDLE_FIELDS
UV_PREPARE_PRIVATE_FIELDS
};
int uv_prepare_init(uv_prepare_t* prepare, uv_close_cb close_cb, void* data);
int uv_prepare_start(uv_prepare_t* prepare, uv_loop_cb cb);
int uv_prepare_stop(uv_prepare_t* prepare);
/*
* Subclass of uv_handle_t. libev wrapper. Every active check handle gets
* its callback called exactly once per loop iteration, just after the
* system returns from blocking.
*/
struct uv_check_s {
UV_HANDLE_FIELDS
UV_CHECK_PRIVATE_FIELDS
};
int uv_check_init(uv_check_t* check, uv_close_cb close_cb, void* data);
int uv_check_start(uv_check_t* check, uv_loop_cb cb);
int uv_check_stop(uv_check_t* check);
/*
* Subclass of uv_handle_t. libev wrapper. Every active idle handle gets its
* callback called repeatedly until it is stopped. This happens after all
* other types of callbacks are processed. When there are multiple "idle"
* handles active, their callbacks are called in turn.
*/
struct uv_idle_s {
UV_HANDLE_FIELDS
UV_IDLE_PRIVATE_FIELDS
};
int uv_idle_init(uv_idle_t* idle, uv_close_cb close_cb, void* data);
int uv_idle_start(uv_idle_t* idle, uv_loop_cb cb);
int uv_idle_stop(uv_idle_t* idle);
/*
* Subclass of uv_handle_t. libev wrapper. uv_async_send wakes up the event
* loop and calls the async handle's callback There is no guarantee that
* every uv_async_send call leads to exactly one invocation of the callback;
* The only guarantee is that the callback function is called at least once
* after the call to async_send. Unlike all other libuv functions,
* uv_async_send can be called from another thread.
*/
typedef struct {
UV_HANDLE_FIELDS
UV_ASYNC_PRIVATE_FIELDS
} uv_async_t;
int uv_async_init(uv_async_t* async, uv_async_cb async_cb,
uv_close_cb close_cb, void* data);
int uv_async_send(uv_async_t* async);
/*
* Subclass of uv_handle_t. Wraps libev's ev_timer watcher. Used to get
* woken up at a specified time in the future.
*/
struct uv_timer_s {
UV_HANDLE_FIELDS
UV_TIMER_PRIVATE_FIELDS
};
int uv_timer_init(uv_timer_t* timer, uv_close_cb close_cb, void* data);
int uv_timer_start(uv_timer_t* timer, uv_loop_cb cb, int64_t timeout, int64_t repeat);
int uv_timer_stop(uv_timer_t* timer);
/*
* Stop the timer, and if it is repeating restart it using the repeat value
* as the timeout. If the timer has never been started before it returns -1 and
* sets the error to UV_EINVAL.
*/
int uv_timer_again(uv_handle_t* handle);
int uv_timer_again(uv_timer_t* timer);
/*
* Set the repeat value. Note that if the repeat value is set from a timer
* callback it does not immediately take effect. If the timer was nonrepeating
* before, it will have been stopped. If it was repeating, then the old repeat
* value will have been used to schedule the next timeout.
*/
void uv_timer_set_repeat(uv_handle_t* handle, int64_t repeat);
int64_t uv_timer_get_repeat(uv_handle_t* handle);
void uv_timer_set_repeat(uv_timer_t* timer, int64_t repeat);
/* libev wrapper. Every active prepare handle gets its callback called
* exactly once per loop iteration, just before the system blocks to wait
* for completed i/o.
*/
int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb);
int uv_prepare_stop(uv_handle_t* handle);
int64_t uv_timer_get_repeat(uv_timer_t* timer);
/* libev wrapper. Every active check handle gets its callback called exactly
* once per loop iteration, just after the system returns from blocking.
*/
int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
int uv_check_start(uv_handle_t* handle, uv_loop_cb cb);
int uv_check_stop(uv_handle_t* handle);
/* libev wrapper. Every active idle handle gets its callback called repeatedly until it is
* stopped. This happens after all other types of callbacks are processed.
* When there are multiple "idle" handles active, their callbacks are called
* in turn.
/*
* Most functions return boolean: 0 for success and -1 for failure.
* On error the user should then call uv_last_error() to determine
* the error code.
*/
int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb);
int uv_idle_stop(uv_handle_t* handle);
uv_err_t uv_last_error();
char* uv_strerror(uv_err_t err);
const char* uv_err_name(uv_err_t err);
/* Returns 1 if the prepare/check/idle handle has been started, 0 otherwise.
* For other handle types this always returns 1.
*/
int uv_is_active(uv_handle_t* handle);
void uv_init(uv_alloc_cb alloc);
int uv_run();
/* libev wrapper. uv_async_send wakes up the event loop and calls the async
* handle's callback There is no guarantee that every uv_async_send call
* leads to exactly one invocation of the callback; The only guarantee is
* that the callback function is called at least once after the call to
* async_send. Unlike everything else, uv_async_send can be called from
* another thread.
*
* QUESTION(ryan) Can UV_ASYNC just use uv_loop_cb? Same signature on my
* side.
/*
* Manually modify the event loop's reference count. Useful if the user wants
* to have a handle or timeout that doesn't keep the loop alive.
*/
int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
uv_close_cb close_cb, void* data);
int uv_async_send(uv_handle_t* handle);
void uv_ref();
void uv_unref();
/* Request handle to be closed. close_cb will be called
* asynchronously after this call.
*/
int uv_close(uv_handle_t* handle);
void uv_update_time();
int64_t uv_now();
/* Utility */
@ -294,6 +357,17 @@ struct sockaddr_in uv_ip4_addr(char* ip, int port);
/* Gets the executable path */
int uv_get_exepath(char* buffer, size_t* size);
/* the presence of this union forces similar struct layout */
union uv_any_handle {
uv_tcp_t tcp;
uv_prepare_t prepare;
uv_check_t check;
uv_idle_t idle;
uv_async_t async;
uv_timer_t timer;
};
#ifdef __cplusplus
}
#endif