diff --git a/LICENSE b/LICENSE
index 74a3424f..3ed5bc5e 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,5 +1,5 @@
-liboio is part of the Node project: http://nodejs.org/
-liboio may be distributed alone under Node's license:
+libuv is part of the Node project: http://nodejs.org/
+libuv may be distributed alone under Node's license:
====
@@ -24,10 +24,10 @@ IN THE SOFTWARE.
====
-This license applies to all parts of liboio that are not externally
+This license applies to all parts of libuv that are not externally
maintained libraries.
-The externally maintained libraries used by liboio are:
+The externally maintained libraries used by libuv are:
- tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.
diff --git a/Makefile b/Makefile
index 7f673e1a..a08d89ea 100644
--- a/Makefile
+++ b/Makefile
@@ -26,15 +26,15 @@ else
include config-unix.mk
endif
-all: oio.a test/run-tests test/run-benchmarks
+all: uv.a test/run-tests test/run-benchmarks
-test/run-tests$(E): test/*.h test/run-tests.c $(RUNNER_SRC) test/runner-unix.c $(TESTS) oio.a
+test/run-tests$(E): test/*.h test/run-tests.c $(RUNNER_SRC) test/runner-unix.c $(TESTS) uv.a
$(CC) $(RUNNER_CFLAGS) $(RUNNER_LINKFLAGS) -o test/run-tests test/run-tests.c \
- test/runner.c $(RUNNER_SRC) $(TESTS) oio.a $(RUNNER_LIBS)
+ test/runner.c $(RUNNER_SRC) $(TESTS) uv.a $(RUNNER_LIBS)
-test/run-benchmarks$(E): test/*.h test/run-benchmarks.c test/runner.c $(RUNNER_SRC) $(BENCHMARKS) oio.a
+test/run-benchmarks$(E): test/*.h test/run-benchmarks.c test/runner.c $(RUNNER_SRC) $(BENCHMARKS) uv.a
$(CC) $(RUNNER_CFLAGS) $(RUNNER_LINKFLAGS) -o test/run-benchmarks test/run-benchmarks.c \
- test/runner.c $(RUNNER_SRC) $(BENCHMARKS) oio.a $(RUNNER_LIBS)
+ test/runner.c $(RUNNER_SRC) $(BENCHMARKS) uv.a $(RUNNER_LIBS)
test/echo.o: test/echo.c test/echo.h
$(CC) $(CFLAGS) -c test/echo.c -o test/echo.o
diff --git a/README b/README
index ddf19b80..38bb8a68 100644
--- a/README
+++ b/README
@@ -4,6 +4,8 @@ all platform differences in this library.
http://nodejs.org/
+(This was previously called liboio)
+
## Supported Platforms
Microsoft Windows operating systems since Windows XP sp2. It can be built
@@ -20,7 +22,7 @@ on all operating systems. This is a large undertaking. Some of the API
decisions may seem arbitrary but are actually borne out of various specific
platform issues.
-## oio_read_start(), oio_read_stop()
+## uv_read_start(), uv_read_stop()
Originally we wanted to provide a asynchronous read function that was
similar to WSARecv().
diff --git a/config-mingw.mk b/config-mingw.mk
index 186a51f4..0642960a 100644
--- a/config-mingw.mk
+++ b/config-mingw.mk
@@ -34,11 +34,11 @@ RUNNER_LINKFLAGS=$(LINKFLAGS)
RUNNER_LIBS=-lws2_32
RUNNER_SRC=test/runner-win.c
-oio.a: oio-win.o
- $(AR) rcs oio.a oio-win.o
+uv.a: uv-win.o
+ $(AR) rcs uv.a uv-win.o
-oio-win.o: oio-win.c oio.h oio-win.h
- $(CC) $(CFLAGS) -c oio-win.c -o oio-win.o
+uv-win.o: uv-win.c uv.h uv-win.h
+ $(CC) $(CFLAGS) -c uv-win.c -o uv-win.o
distclean-platform:
clean-platform:
diff --git a/config-unix.mk b/config-unix.mk
index db04815f..6903824e 100644
--- a/config-unix.mk
+++ b/config-unix.mk
@@ -35,11 +35,11 @@ RUNNER_LINKFLAGS=$(LINKFLAGS) -pthread
RUNNER_LIBS=
RUNNER_SRC=test/runner-unix.c
-oio.a: oio-unix.o ev/ev.o
- $(AR) rcs oio.a oio-unix.o ev/ev.o
+uv.a: uv-unix.o ev/ev.o
+ $(AR) rcs uv.a uv-unix.o ev/ev.o
-oio-unix.o: oio-unix.c oio.h oio-unix.h
- $(CC) $(CFLAGS) -c oio-unix.c -o oio-unix.o
+uv-unix.o: uv-unix.c uv.h uv-unix.h
+ $(CC) $(CFLAGS) -c uv-unix.c -o uv-unix.o
ev/ev.o: ev/config.h ev/ev.c
$(MAKE) -C ev
diff --git a/msvs/liboio-benchmark.vcxproj b/msvs/liboio-benchmark.vcxproj
index 888d6f03..4a24462b 100644
--- a/msvs/liboio-benchmark.vcxproj
+++ b/msvs/liboio-benchmark.vcxproj
@@ -162,11 +162,11 @@
-
+
{301fe650-cd34-14e5-6b63-42e383fa02bc}
-
\ No newline at end of file
+
diff --git a/msvs/liboio-test.vcxproj b/msvs/liboio-test.vcxproj
index f4f8ad85..0a8ff79d 100644
--- a/msvs/liboio-test.vcxproj
+++ b/msvs/liboio-test.vcxproj
@@ -171,11 +171,11 @@
-
+
{301fe650-cd34-14e5-6b63-42e383fa02bc}
-
\ No newline at end of file
+
diff --git a/msvs/liboio.sln b/msvs/liboio.sln
index a16ff82c..a1cf5e76 100644
--- a/msvs/liboio.sln
+++ b/msvs/liboio.sln
@@ -1,11 +1,11 @@
Microsoft Visual Studio Solution File, Format Version 11.00
# Visual Studio 2010
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liboio", "liboio.vcxproj", "{301FE650-CD34-14E5-6B63-42E383FA02BC}"
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libuv", "libuv.vcxproj", "{301FE650-CD34-14E5-6B63-42E383FA02BC}"
EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liboio-test", "liboio-test.vcxproj", "{1D7C3F6C-A4AF-DD73-2D20-B2FC919B3744}"
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libuv-test", "libuv-test.vcxproj", "{1D7C3F6C-A4AF-DD73-2D20-B2FC919B3744}"
EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liboio-benchmark", "liboio-benchmark.vcxproj", "{65312F30-3B19-A87E-E8D1-491D0F4A6536}"
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libuv-benchmark", "libuv-benchmark.vcxproj", "{65312F30-3B19-A87E-E8D1-491D0F4A6536}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
diff --git a/msvs/liboio.vcxproj b/msvs/liboio.vcxproj
index 979a661d..3c7f2e1a 100644
--- a/msvs/liboio.vcxproj
+++ b/msvs/liboio.vcxproj
@@ -101,24 +101,24 @@
-
+
true
true
true
true
-
-
+
+
-
+
true
true
true
true
-
+
@@ -130,4 +130,4 @@
-
\ No newline at end of file
+
diff --git a/oio.h b/oio.h
deleted file mode 100644
index 8ea77f24..00000000
--- a/oio.h
+++ /dev/null
@@ -1,271 +0,0 @@
-/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef OIO_H
-#define OIO_H
-
-#define OIO_VERSION_MAJOR 0
-#define OIO_VERSION_MINOR 1
-
-#include /* int64_t */
-#include /* size_t */
-
-typedef struct oio_err_s oio_err_t;
-typedef struct oio_handle_s oio_handle_t;
-typedef struct oio_req_s oio_req_t;
-
-
-#if defined(__unix__) || defined(__POSIX__) || defined(__APPLE__)
-# include "oio-unix.h"
-#else
-# include "oio-win.h"
-#endif
-
-
-/* The status parameter is 0 if the request completed successfully,
- * and should be -1 if the request was cancelled or failed.
- * For oio_close_cb, -1 means that the handle was closed due to an error.
- * Error details can be obtained by calling oio_last_error().
- *
- * In the case of oio_read_cb the oio_buf returned should be freed by the
- * user.
- */
-typedef oio_buf (*oio_alloc_cb)(oio_handle_t* handle, size_t suggested_size);
-typedef void (*oio_read_cb)(oio_handle_t *handle, int nread, oio_buf buf);
-typedef void (*oio_write_cb)(oio_req_t* req, int status);
-typedef void (*oio_connect_cb)(oio_req_t* req, int status);
-typedef void (*oio_shutdown_cb)(oio_req_t* req, int status);
-typedef void (*oio_accept_cb)(oio_handle_t* handle);
-typedef void (*oio_close_cb)(oio_handle_t* handle, int status);
-typedef void (*oio_timer_cb)(oio_req_t* req, int64_t skew, int status);
-/* TODO: do loop_cb and async_cb really need a status argument? */
-typedef void (*oio_loop_cb)(oio_handle_t* handle, int status);
-typedef void (*oio_async_cb)(oio_handle_t* handle, int stats);
-
-
-/* Expand this list if necessary. */
-typedef enum {
- OIO_UNKNOWN = -1,
- OIO_OK = 0,
- OIO_EOF,
- OIO_EACCESS,
- OIO_EAGAIN,
- OIO_EADDRINUSE,
- OIO_EADDRNOTAVAIL,
- OIO_EAFNOSUPPORT,
- OIO_EALREADY,
- OIO_EBADF,
- OIO_EBUSY,
- OIO_ECONNABORTED,
- OIO_ECONNREFUSED,
- OIO_ECONNRESET,
- OIO_EDESTADDRREQ,
- OIO_EFAULT,
- OIO_EHOSTUNREACH,
- OIO_EINTR,
- OIO_EINVAL,
- OIO_EISCONN,
- OIO_EMFILE,
- OIO_ENETDOWN,
- OIO_ENETUNREACH,
- OIO_ENFILE,
- OIO_ENOBUFS,
- OIO_ENOMEM,
- OIO_ENONET,
- OIO_ENOPROTOOPT,
- OIO_ENOTCONN,
- OIO_ENOTSOCK,
- OIO_ENOTSUP,
- OIO_EPROTO,
- OIO_EPROTONOSUPPORT,
- OIO_EPROTOTYPE,
- OIO_ETIMEDOUT
-} oio_err_code;
-
-typedef enum {
- OIO_UNKNOWN_HANDLE = 0,
- OIO_TCP,
- OIO_NAMED_PIPE,
- OIO_TTY,
- OIO_FILE,
- OIO_PREPARE,
- OIO_CHECK,
- OIO_IDLE,
- OIO_ASYNC
-} oio_handle_type;
-
-typedef enum {
- OIO_UNKNOWN_REQ = 0,
- OIO_CONNECT,
- OIO_ACCEPT,
- OIO_READ,
- OIO_WRITE,
- OIO_SHUTDOWN,
- OIO_TIMEOUT,
- OIO_WAKEUP
-} oio_req_type;
-
-
-struct oio_err_s {
- /* read-only */
- oio_err_code code;
- /* private */
- int sys_errno_;
-};
-
-
-struct oio_req_s {
- /* read-only */
- oio_req_type type;
- /* public */
- oio_handle_t* handle;
- void* cb;
- void* data;
- /* private */
- oio_req_private_fields
-};
-
-
-struct oio_handle_s {
- /* read-only */
- oio_handle_type type;
- /* public */
- oio_close_cb close_cb;
- void* data;
- /* number of bytes queued for writing */
- size_t write_queue_size;
- /* private */
- oio_handle_private_fields
-};
-
-
-/* Most functions return boolean: 0 for success and -1 for failure.
- * On error the user should then call oio_last_error() to determine
- * the error code.
- */
-oio_err_t oio_last_error();
-char* oio_strerror(oio_err_t err);
-
-
-void oio_init(oio_alloc_cb alloc);
-int oio_run();
-
-/* Manually modify the event loop's reference count. Useful if the user wants
- * to have a handle or timeout that doesn't keep the loop alive.
- */
-void oio_ref();
-void oio_unref();
-
-void oio_update_time();
-int64_t oio_now();
-
-void oio_req_init(oio_req_t* req, oio_handle_t* handle, void* cb);
-
-/*
- * TODO:
- * - oio_(pipe|pipe_tty)_handle_init
- * - oio_bind_pipe(char* name)
- * - oio_continuous_read(oio_handle_t* handle, oio_continuous_read_cb* cb)
- * - A way to list cancelled oio_reqs after before/on oio_close_cb
- */
-
-/* TCP socket methods.
- * Handle and callback bust be set by calling oio_req_init.
- */
-int oio_tcp_init(oio_handle_t* handle, oio_close_cb close_cb, void* data);
-int oio_bind(oio_handle_t* handle, struct sockaddr* addr);
-
-int oio_connect(oio_req_t* req, struct sockaddr* addr);
-int oio_shutdown(oio_req_t* req);
-
-/* TCP server methods. */
-int oio_listen(oio_handle_t* handle, int backlog, oio_accept_cb cb);
-
-/* Call this after accept_cb. client does not need to be initialized. */
-int oio_accept(oio_handle_t* server, oio_handle_t* client,
- oio_close_cb close_cb, void* data);
-
-
-/* Read data from an incoming stream. The callback will be made several
- * several times until there is no more data to read or oio_read_stop is
- * called. When we've reached EOF nread will be set to -1 and the error is
- * set to OIO_EOF. When nread == -1 the buf parameter might not point to a
- * valid buffer; in that case buf.len and buf.base are both set to 0.
- * Note that nread might also be 0, which does *not* indicate an error or
- * eof; it happens when liboio requested a buffer through the alloc callback
- * but then decided that it didn't need that buffer.
- */
-int oio_read_start(oio_handle_t* handle, oio_read_cb cb);
-int oio_read_stop(oio_handle_t* handle);
-
-int oio_write(oio_req_t* req, oio_buf bufs[], int bufcnt);
-
-/* Timer methods */
-int oio_timeout(oio_req_t* req, int64_t timeout);
-
-/* libev wrapper. Every active prepare handle gets its callback called
- * exactly once per loop iteration, just before the system blocks to wait
- * for completed i/o.
- */
-int oio_prepare_init(oio_handle_t* handle, oio_close_cb close_cb, void* data);
-int oio_prepare_start(oio_handle_t* handle, oio_loop_cb cb);
-int oio_prepare_stop(oio_handle_t* handle);
-
-/* libev wrapper. Every active check handle gets its callback called exactly
- * once per loop iteration, just after the system returns from blocking.
- */
-int oio_check_init(oio_handle_t* handle, oio_close_cb close_cb, void* data);
-int oio_check_start(oio_handle_t* handle, oio_loop_cb cb);
-int oio_check_stop(oio_handle_t* handle);
-
-/* libev wrapper. Every active idle handle gets its callback called repeatedly until it is
- * stopped. This happens after all other types of callbacks are processed.
- * When there are multiple "idle" handles active, their callbacks are called
- * in turn.
- */
-int oio_idle_init(oio_handle_t* handle, oio_close_cb close_cb, void* data);
-int oio_idle_start(oio_handle_t* handle, oio_loop_cb cb);
-int oio_idle_stop(oio_handle_t* handle);
-
-/* libev wrapper. oio_async_send wakes up the event loop and calls the async
- * handle's callback There is no guarantee that every oio_async_send call
- * leads to exactly one invocation of the callback; The only guarantee is
- * that the callback function is called at least once after the call to
- * async_send. Unlike everything else, oio_async_send can be called from
- * another thread.
- *
- * QUESTION(ryan) Can OIO_ASYNC just use oio_loop_cb? Same signature on my
- * side.
- */
-int oio_async_init(oio_handle_t* handle, oio_async_cb async_cb,
- oio_close_cb close_cb, void* data);
-int oio_async_send(oio_handle_t* handle);
-
-/* Request handle to be closed. close_cb will be called
- * asynchronously after this call.
- */
-int oio_close(oio_handle_t* handle);
-
-
-/* Utility */
-struct sockaddr_in oio_ip4_addr(char* ip, int port);
-
-#endif /* OIO_H */
diff --git a/test/benchmark-ping-pongs.c b/test/benchmark-ping-pongs.c
index 0b4d9e9c..7731c330 100644
--- a/test/benchmark-ping-pongs.c
+++ b/test/benchmark-ping-pongs.c
@@ -19,7 +19,7 @@
* IN THE SOFTWARE.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
#include
@@ -33,13 +33,13 @@
typedef struct {
int pongs;
int state;
- oio_handle_t handle;
- oio_req_t connect_req;
- oio_req_t shutdown_req;
+ uv_handle_t handle;
+ uv_req_t connect_req;
+ uv_req_t shutdown_req;
} pinger_t;
typedef struct buf_s {
- oio_buf oio_buf;
+ uv_buf uv_buf;
struct buf_s* next;
} buf_t;
@@ -52,33 +52,33 @@ static int completed_pingers = 0;
static int64_t start_time;
-static oio_buf buf_alloc(oio_handle_t* handle, size_t size) {
+static uv_buf buf_alloc(uv_handle_t* handle, size_t size) {
buf_t* ab;
ab = buf_freelist;
if (ab != NULL) {
buf_freelist = ab->next;
- return ab->oio_buf;
+ return ab->uv_buf;
}
ab = (buf_t*) malloc(size + sizeof *ab);
- ab->oio_buf.len = size;
- ab->oio_buf.base = ((char*) ab) + sizeof *ab;
+ ab->uv_buf.len = size;
+ ab->uv_buf.base = ((char*) ab) + sizeof *ab;
- return ab->oio_buf;
+ return ab->uv_buf;
}
-static void buf_free(oio_buf oio_buf) {
- buf_t* ab = (buf_t*) (oio_buf.base - sizeof *ab);
+static void buf_free(uv_buf uv_buf) {
+ buf_t* ab = (buf_t*) (uv_buf.base - sizeof *ab);
ab->next = buf_freelist;
buf_freelist = ab;
}
-static void pinger_close_cb(oio_handle_t* handle, int status) {
+static void pinger_close_cb(uv_handle_t* handle, int status) {
pinger_t* pinger;
ASSERT(status == 0);
@@ -92,7 +92,7 @@ static void pinger_close_cb(oio_handle_t* handle, int status) {
}
-static void pinger_write_cb(oio_req_t *req, int status) {
+static void pinger_write_cb(uv_req_t *req, int status) {
ASSERT(status == 0);
free(req);
@@ -100,34 +100,34 @@ static void pinger_write_cb(oio_req_t *req, int status) {
static void pinger_write_ping(pinger_t* pinger) {
- oio_req_t *req;
- oio_buf buf;
+ uv_req_t *req;
+ uv_buf buf;
buf.base = (char*)&PING;
buf.len = strlen(PING);
- req = (oio_req_t*)malloc(sizeof(*req));
- oio_req_init(req, &pinger->handle, pinger_write_cb);
+ req = (uv_req_t*)malloc(sizeof(*req));
+ uv_req_init(req, &pinger->handle, pinger_write_cb);
- if (oio_write(req, &buf, 1)) {
- FATAL("oio_write failed");
+ if (uv_write(req, &buf, 1)) {
+ FATAL("uv_write failed");
}
}
-static void pinger_shutdown_cb(oio_handle_t* handle, int status) {
+static void pinger_shutdown_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
}
-static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
+static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
unsigned int i;
pinger_t* pinger;
pinger = (pinger_t*)handle->data;
if (nread < 0) {
- ASSERT(oio_last_error().code == OIO_EOF);
+ ASSERT(uv_last_error().code == UV_EOF);
if (buf.base) {
buf_free(buf);
@@ -142,9 +142,9 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
pinger->state = (pinger->state + 1) % (sizeof(PING) - 1);
if (pinger->state == 0) {
pinger->pongs++;
- if (oio_now() - start_time > TIME) {
- oio_req_init(&pinger->shutdown_req, handle, pinger_shutdown_cb);
- oio_shutdown(&pinger->shutdown_req);
+ if (uv_now() - start_time > TIME) {
+ uv_req_init(&pinger->shutdown_req, handle, pinger_shutdown_cb);
+ uv_shutdown(&pinger->shutdown_req);
break;
return;
} else {
@@ -157,23 +157,23 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
}
-static void pinger_connect_cb(oio_req_t *req, int status) {
+static void pinger_connect_cb(uv_req_t *req, int status) {
pinger_t *pinger = (pinger_t*)req->handle->data;
ASSERT(status == 0);
pinger_write_ping(pinger);
- if (oio_read_start(req->handle, pinger_read_cb)) {
- FATAL("oio_read_start failed");
+ if (uv_read_start(req->handle, pinger_read_cb)) {
+ FATAL("uv_read_start failed");
}
}
static void pinger_new() {
int r;
- struct sockaddr_in client_addr = oio_ip4_addr("0.0.0.0", 0);
- struct sockaddr_in server_addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
+ struct sockaddr_in client_addr = uv_ip4_addr("0.0.0.0", 0);
+ struct sockaddr_in server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
pinger_t *pinger;
pinger = (pinger_t*)malloc(sizeof(*pinger));
@@ -181,25 +181,25 @@ static void pinger_new() {
pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
- r = oio_tcp_init(&pinger->handle, pinger_close_cb, (void*)pinger);
+ r = uv_tcp_init(&pinger->handle, pinger_close_cb, (void*)pinger);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
- oio_req_init(&pinger->connect_req, &pinger->handle, pinger_connect_cb);
+ uv_req_init(&pinger->connect_req, &pinger->handle, pinger_connect_cb);
- oio_bind(&pinger->handle, (struct sockaddr*)&client_addr);
- r = oio_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
+ uv_bind(&pinger->handle, (struct sockaddr*)&client_addr);
+ r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
ASSERT(!r);
}
BENCHMARK_IMPL(ping_pongs) {
- oio_init(buf_alloc);
- start_time = oio_now();
+ uv_init(buf_alloc);
+ start_time = uv_now();
pinger_new();
- oio_run();
+ uv_run();
ASSERT(completed_pingers == 1);
diff --git a/test/benchmark-pump.c b/test/benchmark-pump.c
index 92524b20..004c114d 100644
--- a/test/benchmark-pump.c
+++ b/test/benchmark-pump.c
@@ -20,7 +20,7 @@
*/
#include "task.h"
-#include "../oio.h"
+#include "../uv.h"
#include
#include
@@ -35,14 +35,14 @@
#define STATS_COUNT 5
-static void do_write(oio_handle_t* handle);
+static void do_write(uv_handle_t* handle);
static void maybe_connect_some();
-static oio_req_t* req_alloc();
-static void req_free(oio_req_t* oio_req);
+static uv_req_t* req_alloc();
+static void req_free(uv_req_t* uv_req);
-static oio_buf buf_alloc(oio_handle_t* handle, size_t size);
-static void buf_free(oio_buf oio_buf);
+static uv_buf buf_alloc(uv_handle_t* handle, size_t size);
+static void buf_free(uv_buf uv_buf);
static struct sockaddr_in server_addr;
@@ -62,8 +62,8 @@ static int stats_left = 0;
static char write_buffer[WRITE_BUFFER_SIZE];
-static oio_handle_t read_handles[TARGET_CONNECTIONS];
-static oio_handle_t write_handles[TARGET_CONNECTIONS];
+static uv_handle_t read_handles[TARGET_CONNECTIONS];
+static uv_handle_t write_handles[TARGET_CONNECTIONS];
static double gbit(int64_t bytes, int64_t passed_ms) {
@@ -72,7 +72,7 @@ static double gbit(int64_t bytes, int64_t passed_ms) {
}
-static void show_stats(oio_req_t *req, int64_t skew, int status) {
+static void show_stats(uv_req_t *req, int64_t skew, int status) {
int64_t msec = STATS_INTERVAL + skew;
#if PRINT_STATS
@@ -98,30 +98,30 @@ static void show_stats(oio_req_t *req, int64_t skew, int status) {
nrecv = 0;
nsent = 0;
- oio_timeout(req, (STATS_INTERVAL - skew > 0)
+ uv_timeout(req, (STATS_INTERVAL - skew > 0)
? STATS_INTERVAL - skew
: 0);
}
static void start_stats_collection() {
- oio_req_t* req = req_alloc();
+ uv_req_t* req = req_alloc();
int r;
/* Show-stats timeout */
stats_left = STATS_COUNT;
- oio_req_init(req, NULL, (void*)show_stats);
- r = oio_timeout(req, STATS_INTERVAL);
+ uv_req_init(req, NULL, (void*)show_stats);
+ r = uv_timeout(req, STATS_INTERVAL);
ASSERT(r == 0);
}
-void close_cb(oio_handle_t* handle, int status) {
+void close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
}
-static void read_cb(oio_handle_t* handle, int bytes, oio_buf buf) {
+static void read_cb(uv_handle_t* handle, int bytes, uv_buf buf) {
ASSERT(bytes >= 0);
buf_free(buf);
@@ -131,8 +131,8 @@ static void read_cb(oio_handle_t* handle, int bytes, oio_buf buf) {
}
-static void write_cb(oio_req_t *req, int status) {
- oio_buf* buf = (oio_buf*) req->data;
+static void write_cb(uv_req_t *req, int status) {
+ uv_buf* buf = (uv_buf*) req->data;
ASSERT(status == 0);
@@ -145,9 +145,9 @@ static void write_cb(oio_req_t *req, int status) {
}
-static void do_write(oio_handle_t* handle) {
- oio_req_t* req;
- oio_buf buf;
+static void do_write(uv_handle_t* handle) {
+ uv_req_t* req;
+ uv_buf buf;
int r;
buf.base = (char*) &write_buffer;
@@ -155,9 +155,9 @@ static void do_write(oio_handle_t* handle) {
while (handle->write_queue_size == 0) {
req = req_alloc();
- oio_req_init(req, handle, write_cb);
+ uv_req_init(req, handle, write_cb);
- r = oio_write(req, &buf, 1);
+ r = uv_write(req, &buf, 1);
ASSERT(r == 0);
}
}
@@ -177,8 +177,8 @@ static void maybe_start_writing() {
}
-static void connect_cb(oio_req_t* req, int status) {
- if (status) LOG(oio_strerror(oio_last_error()));
+static void connect_cb(uv_req_t* req, int status) {
+ if (status) LOG(uv_strerror(uv_last_error()));
ASSERT(status == 0);
write_sockets++;
@@ -189,16 +189,16 @@ static void connect_cb(oio_req_t* req, int status) {
}
-static void do_connect(oio_handle_t* handle, struct sockaddr* addr) {
- oio_req_t* req;
+static void do_connect(uv_handle_t* handle, struct sockaddr* addr) {
+ uv_req_t* req;
int r;
- r = oio_tcp_init(handle, close_cb, NULL);
+ r = uv_tcp_init(handle, close_cb, NULL);
ASSERT(r == 0);
req = req_alloc();
- oio_req_init(req, handle, connect_cb);
- r = oio_connect(req, addr);
+ uv_req_init(req, handle, connect_cb);
+ r = uv_connect(req, addr);
ASSERT(r == 0);
}
@@ -212,17 +212,17 @@ static void maybe_connect_some() {
}
-static void accept_cb(oio_handle_t* server) {
- oio_handle_t* handle;
+static void accept_cb(uv_handle_t* server) {
+ uv_handle_t* handle;
int r;
ASSERT(read_sockets < TARGET_CONNECTIONS);
handle = &read_handles[read_sockets];
- r = oio_accept(server, handle, close_cb, NULL);
+ r = uv_accept(server, handle, close_cb, NULL);
ASSERT(r == 0);
- r = oio_read_start(handle, read_cb);
+ r = uv_read_start(handle, read_cb);
ASSERT(r == 0);
read_sockets++;
@@ -232,18 +232,18 @@ static void accept_cb(oio_handle_t* server) {
BENCHMARK_IMPL(pump) {
- oio_handle_t server;
+ uv_handle_t server;
int r;
- oio_init(buf_alloc);
+ uv_init(buf_alloc);
/* Server */
- server_addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
- r = oio_tcp_init(&server, close_cb, NULL);
+ server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
+ r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
- r = oio_bind(&server, (struct sockaddr*) &server_addr);
+ r = uv_bind(&server, (struct sockaddr*) &server_addr);
ASSERT(r == 0);
- r = oio_listen(&server, TARGET_CONNECTIONS, accept_cb);
+ r = uv_listen(&server, TARGET_CONNECTIONS, accept_cb);
ASSERT(r == 0);
oio_update_time();
@@ -252,7 +252,7 @@ BENCHMARK_IMPL(pump) {
/* Start making connections */
maybe_connect_some();
- oio_run();
+ uv_run();
return 0;
}
@@ -263,7 +263,7 @@ BENCHMARK_IMPL(pump) {
*/
typedef struct req_list_s {
- oio_req_t oio_req;
+ uv_req_t uv_req;
struct req_list_s* next;
} req_list_t;
@@ -271,22 +271,22 @@ typedef struct req_list_s {
static req_list_t* req_freelist = NULL;
-static oio_req_t* req_alloc() {
+static uv_req_t* req_alloc() {
req_list_t* req;
req = req_freelist;
if (req != NULL) {
req_freelist = req->next;
- return (oio_req_t*) req;
+ return (uv_req_t*) req;
}
req = (req_list_t*) malloc(sizeof *req);
- return (oio_req_t*) req;
+ return (uv_req_t*) req;
}
-static void req_free(oio_req_t* oio_req) {
- req_list_t* req = (req_list_t*) oio_req;
+static void req_free(uv_req_t* uv_req) {
+ req_list_t* req = (req_list_t*) uv_req;
req->next = req_freelist;
req_freelist = req;
@@ -298,7 +298,7 @@ static void req_free(oio_req_t* oio_req) {
*/
typedef struct buf_list_s {
- oio_buf oio_buf;
+ uv_buf uv_buf;
struct buf_list_s* next;
} buf_list_t;
@@ -306,25 +306,25 @@ typedef struct buf_list_s {
static buf_list_t* buf_freelist = NULL;
-static oio_buf buf_alloc(oio_handle_t* handle, size_t size) {
+static uv_buf buf_alloc(uv_handle_t* handle, size_t size) {
buf_list_t* buf;
buf = buf_freelist;
if (buf != NULL) {
buf_freelist = buf->next;
- return buf->oio_buf;
+ return buf->uv_buf;
}
buf = (buf_list_t*) malloc(size + sizeof *buf);
- buf->oio_buf.len = (unsigned int)size;
- buf->oio_buf.base = ((char*) buf) + sizeof *buf;
+ buf->uv_buf.len = (unsigned int)size;
+ buf->uv_buf.base = ((char*) buf) + sizeof *buf;
- return buf->oio_buf;
+ return buf->uv_buf;
}
-static void buf_free(oio_buf oio_buf) {
- buf_list_t* buf = (buf_list_t*) (oio_buf.base - sizeof *buf);
+static void buf_free(uv_buf uv_buf) {
+ buf_list_t* buf = (buf_list_t*) (uv_buf.base - sizeof *buf);
buf->next = buf_freelist;
buf_freelist = buf;
diff --git a/test/benchmark-sizes.c b/test/benchmark-sizes.c
index 6d4d5386..cc8f7d43 100644
--- a/test/benchmark-sizes.c
+++ b/test/benchmark-sizes.c
@@ -20,11 +20,11 @@
*/
#include "task.h"
-#include "../oio.h"
+#include "../uv.h"
BENCHMARK_IMPL(sizes) {
- LOGF("oio_handle_t: %lu bytes\n", sizeof(oio_handle_t));
- LOGF("oio_req_t: %lu bytes\n", sizeof(oio_req_t));
+ LOGF("uv_handle_t: %lu bytes\n", sizeof(uv_handle_t));
+ LOGF("uv_req_t: %lu bytes\n", sizeof(uv_req_t));
return 0;
}
diff --git a/test/echo-server.c b/test/echo-server.c
index 38cfe95c..32a415c8 100644
--- a/test/echo-server.c
+++ b/test/echo-server.c
@@ -19,33 +19,33 @@
* IN THE SOFTWARE.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
#include
#include
typedef struct {
- oio_req_t req;
- oio_buf buf;
+ uv_req_t req;
+ uv_buf buf;
} write_req_t;
-static oio_handle_t server;
+static uv_handle_t server;
-static void after_write(oio_req_t* req, int status);
-static void after_read(oio_handle_t* handle, int nread, oio_buf buf);
-static void on_close(oio_handle_t* peer, int status);
-static void on_accept(oio_handle_t* handle);
+static void after_write(uv_req_t* req, int status);
+static void after_read(uv_handle_t* handle, int nread, uv_buf buf);
+static void on_close(uv_handle_t* peer, int status);
+static void on_accept(uv_handle_t* handle);
-static void after_write(oio_req_t* req, int status) {
+static void after_write(uv_req_t* req, int status) {
write_req_t* wr;
if (status) {
- oio_err_t err = oio_last_error();
- fprintf(stderr, "oio_write error: %s\n", oio_strerror(err));
+ uv_err_t err = uv_last_error();
+ fprintf(stderr, "uv_write error: %s\n", uv_strerror(err));
ASSERT(0);
}
@@ -57,26 +57,26 @@ static void after_write(oio_req_t* req, int status) {
}
-static void after_shutdown(oio_req_t* req, int status) {
+static void after_shutdown(uv_req_t* req, int status) {
free(req);
}
-static void after_read(oio_handle_t* handle, int nread, oio_buf buf) {
+static void after_read(uv_handle_t* handle, int nread, uv_buf buf) {
write_req_t *wr;
- oio_req_t* req;
+ uv_req_t* req;
if (nread < 0) {
/* Error or EOF */
- ASSERT (oio_last_error().code == OIO_EOF);
+ ASSERT (uv_last_error().code == UV_EOF);
if (buf.base) {
free(buf.base);
}
- req = (oio_req_t*) malloc(sizeof *req);
- oio_req_init(req, handle, after_shutdown);
- oio_shutdown(req);
+ req = (uv_req_t*) malloc(sizeof *req);
+ uv_req_init(req, handle, after_shutdown);
+ uv_shutdown(req);
return;
}
@@ -89,58 +89,58 @@ static void after_read(oio_handle_t* handle, int nread, oio_buf buf) {
wr = (write_req_t*) malloc(sizeof *wr);
- oio_req_init(&wr->req, handle, after_write);
+ uv_req_init(&wr->req, handle, after_write);
wr->buf.base = buf.base;
wr->buf.len = nread;
- if (oio_write(&wr->req, &wr->buf, 1)) {
- FATAL("oio_write failed");
+ if (uv_write(&wr->req, &wr->buf, 1)) {
+ FATAL("uv_write failed");
}
}
-static void on_close(oio_handle_t* peer, int status) {
+static void on_close(uv_handle_t* peer, int status) {
if (status != 0) {
fprintf(stdout, "Socket error\n");
}
}
-static void on_accept(oio_handle_t* server) {
- oio_handle_t* handle = (oio_handle_t*) malloc(sizeof *handle);
+static void on_accept(uv_handle_t* server) {
+ uv_handle_t* handle = (uv_handle_t*) malloc(sizeof *handle);
- if (oio_accept(server, handle, on_close, NULL)) {
- FATAL("oio_accept failed");
+ if (uv_accept(server, handle, on_close, NULL)) {
+ FATAL("uv_accept failed");
}
- oio_read_start(handle, after_read);
+ uv_read_start(handle, after_read);
}
-static void on_server_close(oio_handle_t* handle, int status) {
+static void on_server_close(uv_handle_t* handle, int status) {
ASSERT(handle == &server);
ASSERT(status == 0);
}
static int echo_start(int port) {
- struct sockaddr_in addr = oio_ip4_addr("0.0.0.0", port);
+ struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", port);
int r;
- r = oio_tcp_init(&server, on_server_close, NULL);
+ r = uv_tcp_init(&server, on_server_close, NULL);
if (r) {
/* TODO: Error codes */
fprintf(stderr, "Socket creation error\n");
return 1;
}
- r = oio_bind(&server, (struct sockaddr*) &addr);
+ r = uv_bind(&server, (struct sockaddr*) &addr);
if (r) {
/* TODO: Error codes */
fprintf(stderr, "Bind error\n");
return 1;
}
- r = oio_listen(&server, 128, on_accept);
+ r = uv_listen(&server, 128, on_accept);
if (r) {
/* TODO: Error codes */
fprintf(stderr, "Listen error\n");
@@ -152,12 +152,12 @@ static int echo_start(int port) {
static int echo_stop() {
- return oio_close(&server);
+ return uv_close(&server);
}
-static oio_buf echo_alloc(oio_handle_t* handle, size_t suggested_size) {
- oio_buf buf;
+static uv_buf echo_alloc(uv_handle_t* handle, size_t suggested_size) {
+ uv_buf buf;
buf.base = (char*) malloc(suggested_size);
buf.len = suggested_size;
return buf;
@@ -165,11 +165,11 @@ static oio_buf echo_alloc(oio_handle_t* handle, size_t suggested_size) {
HELPER_IMPL(echo_server) {
- oio_init(echo_alloc);
+ uv_init(echo_alloc);
if (echo_start(TEST_PORT))
return 1;
fprintf(stderr, "Listening!\n");
- oio_run();
+ uv_run();
return 0;
}
diff --git a/test/runner-unix.c b/test/runner-unix.c
index 82aaffb3..7af10c45 100644
--- a/test/runner-unix.c
+++ b/test/runner-unix.c
@@ -309,12 +309,12 @@ void rewind_cursor() {
}
-typedef void* (*oio_thread_cb)(void* arg);
+typedef void* (*uv_thread_cb)(void* arg);
-uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg) {
+uintptr_t uv_create_thread(void (*entry)(void* arg), void* arg) {
pthread_t t;
- oio_thread_cb cb = (oio_thread_cb)entry;
+ uv_thread_cb cb = (uv_thread_cb)entry;
int r = pthread_create(&t, NULL, cb, arg);
if (r) {
@@ -328,12 +328,12 @@ uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg) {
/* Wait for a thread to terminate. Should return 0 if the thread ended, -1 on
* error.
*/
-int oio_wait_thread(uintptr_t thread_id) {
+int uv_wait_thread(uintptr_t thread_id) {
return pthread_join((pthread_t)thread_id, NULL);
}
/* Pause the calling thread for a number of milliseconds. */
-void oio_sleep(int msec) {
+void uv_sleep(int msec) {
usleep(msec);
}
diff --git a/test/runner-win.c b/test/runner-win.c
index ab111bc9..a91b7d5f 100644
--- a/test/runner-win.c
+++ b/test/runner-win.c
@@ -62,7 +62,7 @@ int process_start(char *name, process_info_t *p) {
if (GetTempPathW(sizeof(path) / sizeof(WCHAR), (WCHAR*)&path) == 0)
goto error;
- if (GetTempFileNameW((WCHAR*)&path, L"oio", 0, (WCHAR*)&filename) == 0)
+ if (GetTempFileNameW((WCHAR*)&path, L"uv", 0, (WCHAR*)&filename) == 0)
goto error;
file = CreateFileW((WCHAR*)filename,
@@ -283,7 +283,7 @@ static unsigned __stdcall create_thread_helper(void* info) {
/* Create a thread. Returns the thread identifier, or 0 on failure. */
-uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg) {
+uintptr_t uv_create_thread(void (*entry)(void* arg), void* arg) {
uintptr_t result;
thread_info_t* info;
@@ -314,7 +314,7 @@ uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg) {
/* Wait for a thread to terminate. Should return 0 if the thread ended, -1 on
* error.
*/
-int oio_wait_thread(uintptr_t thread_id) {
+int uv_wait_thread(uintptr_t thread_id) {
if (WaitForSingleObject((HANDLE)thread_id, INFINITE) != WAIT_OBJECT_0) {
return -1;
}
@@ -324,6 +324,6 @@ int oio_wait_thread(uintptr_t thread_id) {
/* Pause the calling thread for a number of milliseconds. */
-void oio_sleep(int msec) {
+void uv_sleep(int msec) {
Sleep(msec);
}
diff --git a/test/task.h b/test/task.h
index a190f324..50c37840 100644
--- a/test/task.h
+++ b/test/task.h
@@ -76,14 +76,14 @@
/* Create a thread. Returns the thread identifier, or 0 on failure. */
-uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg);
+uintptr_t uv_create_thread(void (*entry)(void* arg), void* arg);
/* Wait for a thread to terminate. Should return 0 if the thread ended, -1 on
* error.
*/
-int oio_wait_thread(uintptr_t thread_id);
+int uv_wait_thread(uintptr_t thread_id);
/* Pause the calling thread for a number of milliseconds. */
-void oio_sleep(int msec);
+void uv_sleep(int msec);
#endif /* TASK_H_ */
diff --git a/test/test-async.c b/test/test-async.c
index 4f1a43b3..d43fdcca 100644
--- a/test/test-async.c
+++ b/test/test-async.c
@@ -19,16 +19,16 @@
* IN THE SOFTWARE.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
#include
#include
-static oio_handle_t prepare_handle;
+static uv_handle_t prepare_handle;
-static oio_handle_t async1_handle;
-/* static oio_handle_t async2_handle; */
+static uv_handle_t async1_handle;
+/* static uv_handle_t async2_handle; */
static int prepare_cb_called = 0;
@@ -49,20 +49,20 @@ static uintptr_t thread3_id = 0;
void thread1_entry(void *arg) {
int state = 0;
- oio_sleep(50);
+ uv_sleep(50);
while (1) {
switch (async1_cb_called) {
case 0:
- oio_async_send(&async1_handle);
+ uv_async_send(&async1_handle);
break;
case 1:
- oio_async_send(&async1_handle);
+ uv_async_send(&async1_handle);
break;
case 2:
- oio_async_send(&async1_handle);
+ uv_async_send(&async1_handle);
break;
default:
@@ -72,47 +72,47 @@ void thread1_entry(void *arg) {
}
#if 0
-/* Thread 2 calls oio_async_send on async_handle_2 8 times. */
+/* Thread 2 calls uv_async_send on async_handle_2 8 times. */
void thread2_entry(void *arg) {
int i;
while (1) {
switch (async1_cb_called) {
case 0:
- oio_async_send(&async2_handle);
+ uv_async_send(&async2_handle);
break;
case 1:
- oio_async_send(&async2_handle);
+ uv_async_send(&async2_handle);
break;
case 2:
- oio_async_send(&async2_handle);
+ uv_async_send(&async2_handle);
break;
}
- oio_sleep(5);
+ uv_sleep(5);
}
if (async1_cb_called == 20) {
- oio_close(handle);
+ uv_close(handle);
}
}
-/* Thread 3 calls oio_async_send on async_handle_2 8 times
+/* Thread 3 calls uv_async_send on async_handle_2 8 times
* after waiting half a second first.
*/
void thread3_entry(void *arg) {
int i;
for (i = 0; i < 8; i++) {
- oio_async_send(&async2_handle);
+ uv_async_send(&async2_handle);
}
}
#endif
-static void close_cb(oio_handle_t* handle, int status) {
+static void close_cb(uv_handle_t* handle, int status) {
ASSERT(handle != NULL);
ASSERT(status == 0);
@@ -120,14 +120,14 @@ static void close_cb(oio_handle_t* handle, int status) {
}
-static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
- oio_buf buf = {0, 0};
+static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
+ uv_buf buf = {0, 0};
FATAL("alloc should not be called");
return buf;
}
-static void async1_cb(oio_handle_t* handle, int status) {
+static void async1_cb(uv_handle_t* handle, int status) {
ASSERT(handle == &async1_handle);
ASSERT(status == 0);
@@ -136,13 +136,13 @@ static void async1_cb(oio_handle_t* handle, int status) {
if (async1_cb_called > 2 && !async1_closed) {
async1_closed = 1;
- oio_close(handle);
+ uv_close(handle);
}
}
#if 0
-static void async2_cb(oio_handle_t* handle, int status) {
+static void async2_cb(uv_handle_t* handle, int status) {
ASSERT(handle == &async2_handle);
ASSERT(status == 0);
@@ -150,13 +150,13 @@ static void async2_cb(oio_handle_t* handle, int status) {
printf("async2_cb #%d\n", async2_cb_called);
if (async2_cb_called == 16) {
- oio_close(handle);
+ uv_close(handle);
}
}
#endif
-static void prepare_cb(oio_handle_t* handle, int status) {
+static void prepare_cb(uv_handle_t* handle, int status) {
int r;
ASSERT(handle == &prepare_handle);
@@ -164,24 +164,24 @@ static void prepare_cb(oio_handle_t* handle, int status) {
switch (prepare_cb_called) {
case 0:
- thread1_id = oio_create_thread(thread1_entry, NULL);
+ thread1_id = uv_create_thread(thread1_entry, NULL);
ASSERT(thread1_id != 0);
break;
#if 0
case 1:
- thread2_id = oio_create_thread(thread2_entry, NULL);
+ thread2_id = uv_create_thread(thread2_entry, NULL);
ASSERT(thread2_id != 0);
break;
case 2:
- thread3_id = oio_create_thread(thread3_entry, NULL);
+ thread3_id = uv_create_thread(thread3_entry, NULL);
ASSERT(thread3_id != 0);
break;
#endif
case 1:
- r = oio_close(handle);
+ r = uv_close(handle);
ASSERT(r == 0);
break;
@@ -196,30 +196,30 @@ static void prepare_cb(oio_handle_t* handle, int status) {
TEST_IMPL(async) {
int r;
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- r = oio_prepare_init(&prepare_handle, close_cb, NULL);
+ r = uv_prepare_init(&prepare_handle, close_cb, NULL);
ASSERT(r == 0);
- r = oio_prepare_start(&prepare_handle, prepare_cb);
+ r = uv_prepare_start(&prepare_handle, prepare_cb);
ASSERT(r == 0);
- r = oio_async_init(&async1_handle, async1_cb, close_cb, NULL);
+ r = uv_async_init(&async1_handle, async1_cb, close_cb, NULL);
ASSERT(r == 0);
#if 0
- r = oio_async_init(&async2_handle, async2_cb, close_cb, NULL);
+ r = uv_async_init(&async2_handle, async2_cb, close_cb, NULL);
ASSERT(r == 0);
#endif
- r = oio_run();
+ r = uv_run();
ASSERT(r == 0);
- r = oio_wait_thread(thread1_id);
+ r = uv_wait_thread(thread1_id);
ASSERT(r == 0);
#if 0
- r = oio_wait_thread(thread2_id);
+ r = uv_wait_thread(thread2_id);
ASSERT(r == 0);
- r = oio_wait_thread(thread3_id);
+ r = uv_wait_thread(thread3_id);
ASSERT(r == 0);
#endif
diff --git a/test/test-bind-error.c b/test/test-bind-error.c
index da0f1d09..a29914a9 100644
--- a/test/test-bind-error.c
+++ b/test/test-bind-error.c
@@ -19,7 +19,7 @@
* IN THE SOFTWARE.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
#include
#include
@@ -28,7 +28,7 @@
static int close_cb_called = 0;
-static void close_cb(oio_handle_t* handle, int status) {
+static void close_cb(uv_handle_t* handle, int status) {
ASSERT(handle != NULL);
ASSERT(status == 0);
@@ -36,41 +36,41 @@ static void close_cb(oio_handle_t* handle, int status) {
}
-static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
- oio_buf buf = {0, 0};
+static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
+ uv_buf buf = {0, 0};
FATAL("alloc should not be called");
return buf;
}
TEST_IMPL(bind_error_addrinuse) {
- struct sockaddr_in addr = oio_ip4_addr("0.0.0.0", TEST_PORT);
- oio_handle_t server1, server2;
+ struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
+ uv_handle_t server1, server2;
int r;
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- r = oio_tcp_init(&server1, close_cb, NULL);
+ r = uv_tcp_init(&server1, close_cb, NULL);
ASSERT(r == 0);
- r = oio_bind(&server1, (struct sockaddr*) &addr);
+ r = uv_bind(&server1, (struct sockaddr*) &addr);
ASSERT(r == 0);
- r = oio_tcp_init(&server2, close_cb, NULL);
+ r = uv_tcp_init(&server2, close_cb, NULL);
ASSERT(r == 0);
- r = oio_bind(&server2, (struct sockaddr*) &addr);
+ r = uv_bind(&server2, (struct sockaddr*) &addr);
ASSERT(r == 0);
- r = oio_listen(&server1, 128, NULL);
+ r = uv_listen(&server1, 128, NULL);
ASSERT(r == 0);
- r = oio_listen(&server2, 128, NULL);
+ r = uv_listen(&server2, 128, NULL);
ASSERT(r == -1);
- ASSERT(oio_last_error().code == OIO_EADDRINUSE);
+ ASSERT(uv_last_error().code == UV_EADDRINUSE);
- oio_close(&server1);
- oio_close(&server2);
+ uv_close(&server1);
+ uv_close(&server2);
- oio_run();
+ uv_run();
ASSERT(close_cb_called == 2);
@@ -79,24 +79,24 @@ TEST_IMPL(bind_error_addrinuse) {
TEST_IMPL(bind_error_addrnotavail_1) {
- struct sockaddr_in addr = oio_ip4_addr("127.255.255.255", TEST_PORT);
- oio_handle_t server;
+ struct sockaddr_in addr = uv_ip4_addr("127.255.255.255", TEST_PORT);
+ uv_handle_t server;
int r;
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- r = oio_tcp_init(&server, close_cb, NULL);
+ r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
- r = oio_bind(&server, (struct sockaddr*) &addr);
+ r = uv_bind(&server, (struct sockaddr*) &addr);
/* It seems that Linux is broken here - bind succeeds. */
if (r == -1) {
- ASSERT(oio_last_error().code == OIO_EADDRNOTAVAIL);
+ ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
}
- oio_close(&server);
+ uv_close(&server);
- oio_run();
+ uv_run();
ASSERT(close_cb_called == 1);
@@ -105,21 +105,21 @@ TEST_IMPL(bind_error_addrnotavail_1) {
TEST_IMPL(bind_error_addrnotavail_2) {
- struct sockaddr_in addr = oio_ip4_addr("4.4.4.4", TEST_PORT);
- oio_handle_t server;
+ struct sockaddr_in addr = uv_ip4_addr("4.4.4.4", TEST_PORT);
+ uv_handle_t server;
int r;
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- r = oio_tcp_init(&server, close_cb, NULL);
+ r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
- r = oio_bind(&server, (struct sockaddr*) &addr);
+ r = uv_bind(&server, (struct sockaddr*) &addr);
ASSERT(r == -1);
- ASSERT(oio_last_error().code == OIO_EADDRNOTAVAIL);
+ ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
- oio_close(&server);
+ uv_close(&server);
- oio_run();
+ uv_run();
ASSERT(close_cb_called == 1);
@@ -129,49 +129,49 @@ TEST_IMPL(bind_error_addrnotavail_2) {
TEST_IMPL(bind_error_fault) {
char garbage[] = "blah blah blah blah blah blah blah blah blah blah blah blah";
- oio_handle_t server;
+ uv_handle_t server;
int r;
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- r = oio_tcp_init(&server, close_cb, NULL);
+ r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
- r = oio_bind(&server, (struct sockaddr*) &garbage);
+ r = uv_bind(&server, (struct sockaddr*) &garbage);
ASSERT(r == -1);
- ASSERT(oio_last_error().code == OIO_EFAULT);
+ ASSERT(uv_last_error().code == UV_EFAULT);
- oio_close(&server);
+ uv_close(&server);
- oio_run();
+ uv_run();
ASSERT(close_cb_called == 1);
return 0;
}
-/* Notes: On Linux oio_bind(server, NULL) will segfault the program. */
+/* Notes: On Linux uv_bind(server, NULL) will segfault the program. */
TEST_IMPL(bind_error_inval) {
- struct sockaddr_in addr1 = oio_ip4_addr("0.0.0.0", TEST_PORT);
- struct sockaddr_in addr2 = oio_ip4_addr("0.0.0.0", TEST_PORT_2);
- oio_handle_t server;
+ struct sockaddr_in addr1 = uv_ip4_addr("0.0.0.0", TEST_PORT);
+ struct sockaddr_in addr2 = uv_ip4_addr("0.0.0.0", TEST_PORT_2);
+ uv_handle_t server;
int r;
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- r = oio_tcp_init(&server, close_cb, NULL);
+ r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
- r = oio_bind(&server, (struct sockaddr*) &addr1);
+ r = uv_bind(&server, (struct sockaddr*) &addr1);
ASSERT(r == 0);
- r = oio_bind(&server, (struct sockaddr*) &addr2);
+ r = uv_bind(&server, (struct sockaddr*) &addr2);
ASSERT(r == -1);
- ASSERT(oio_last_error().code == OIO_EINVAL);
+ ASSERT(uv_last_error().code == UV_EINVAL);
- oio_close(&server);
+ uv_close(&server);
- oio_run();
+ uv_run();
ASSERT(close_cb_called == 1);
diff --git a/test/test-callback-stack.c b/test/test-callback-stack.c
index 2c5df068..986f863d 100644
--- a/test/test-callback-stack.c
+++ b/test/test-callback-stack.c
@@ -24,14 +24,14 @@
* stack.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
static const char MESSAGE[] = "Failure is for the weak. Everyone dies alone.";
-static oio_handle_t client;
-static oio_req_t connect_req, write_req, timeout_req, shutdown_req;
+static uv_handle_t client;
+static uv_req_t connect_req, write_req, timeout_req, shutdown_req;
static int nested = 0;
static int close_cb_called = 0;
@@ -42,7 +42,7 @@ static int bytes_received = 0;
static int shutdown_cb_called = 0;
-static void close_cb(oio_handle_t* handle, int status) {
+static void close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
ASSERT(nested == 0 && "close_cb must be called from a fresh stack");
@@ -50,7 +50,7 @@ static void close_cb(oio_handle_t* handle, int status) {
}
-static void shutdown_cb(oio_req_t* req, int status) {
+static void shutdown_cb(uv_req_t* req, int status) {
ASSERT(status == 0);
ASSERT(nested == 0 && "shutdown_cb must be called from a fresh stack");
@@ -58,22 +58,22 @@ static void shutdown_cb(oio_req_t* req, int status) {
}
-static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
+static void read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
ASSERT(nested == 0 && "read_cb must be called from a fresh stack");
printf("Read. nread == %d\n", nread);
free(buf.base);
if (nread == 0) {
- ASSERT(oio_last_error().code == OIO_EAGAIN);
+ ASSERT(uv_last_error().code == UV_EAGAIN);
return;
} else if (nread == -1) {
- ASSERT(oio_last_error().code == OIO_EOF);
+ ASSERT(uv_last_error().code == UV_EOF);
nested++;
- if (oio_close(handle)) {
- FATAL("oio_close failed");
+ if (uv_close(handle)) {
+ FATAL("uv_close failed");
}
nested--;
@@ -88,27 +88,27 @@ static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
/* from a fresh stack. */
if (bytes_received == sizeof MESSAGE) {
nested++;
- oio_req_init(&shutdown_req, handle, shutdown_cb);
+ uv_req_init(&shutdown_req, handle, shutdown_cb);
puts("Shutdown");
- if (oio_shutdown(&shutdown_req)) {
- FATAL("oio_shutdown failed");
+ if (uv_shutdown(&shutdown_req)) {
+ FATAL("uv_shutdown failed");
}
nested--;
}
}
-static void timeout_cb(oio_req_t* req, int64_t skew, int status) {
+static void timeout_cb(uv_req_t* req, int64_t skew, int status) {
ASSERT(status == 0);
ASSERT(nested == 0 && "timeout_cb must be called from a fresh stack");
puts("Timeout complete. Now read data...");
nested++;
- if (oio_read_start(&client, read_cb)) {
- FATAL("oio_read_start failed");
+ if (uv_read_start(&client, read_cb)) {
+ FATAL("uv_read_start failed");
}
nested--;
@@ -116,7 +116,7 @@ static void timeout_cb(oio_req_t* req, int64_t skew, int status) {
}
-static void write_cb(oio_req_t* req, int status) {
+static void write_cb(uv_req_t* req, int status) {
ASSERT(status == 0);
ASSERT(nested == 0 && "write_cb must be called from a fresh stack");
@@ -127,9 +127,9 @@ static void write_cb(oio_req_t* req, int status) {
/* back to our receive buffer when we start reading. This maximizes the */
/* tempation for the backend to use dirty stack for calling read_cb. */
nested++;
- oio_req_init(&timeout_req, NULL, timeout_cb);
- if (oio_timeout(&timeout_req, 500)) {
- FATAL("oio_timeout failed");
+ uv_req_init(&timeout_req, NULL, timeout_cb);
+ if (uv_timeout(&timeout_req, 500)) {
+ FATAL("uv_timeout failed");
}
nested--;
@@ -137,8 +137,8 @@ static void write_cb(oio_req_t* req, int status) {
}
-static void connect_cb(oio_req_t* req, int status) {
- oio_buf buf;
+static void connect_cb(uv_req_t* req, int status) {
+ uv_buf buf;
puts("Connected. Write some data to echo server...");
@@ -150,10 +150,10 @@ static void connect_cb(oio_req_t* req, int status) {
buf.base = (char*) &MESSAGE;
buf.len = sizeof MESSAGE;
- oio_req_init(&write_req, req->handle, write_cb);
+ uv_req_init(&write_req, req->handle, write_cb);
- if (oio_write(&write_req, &buf, 1)) {
- FATAL("oio_write failed");
+ if (uv_write(&write_req, &buf, 1)) {
+ FATAL("uv_write failed");
}
nested--;
@@ -162,8 +162,8 @@ static void connect_cb(oio_req_t* req, int status) {
}
-static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
- oio_buf buf;
+static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
+ uv_buf buf;
buf.len = size;
buf.base = (char*) malloc(size);
ASSERT(buf.base);
@@ -172,24 +172,24 @@ static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
TEST_IMPL(callback_stack) {
- struct sockaddr_in addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
+ struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- if (oio_tcp_init(&client, &close_cb, NULL)) {
- FATAL("oio_tcp_init failed");
+ if (uv_tcp_init(&client, &close_cb, NULL)) {
+ FATAL("uv_tcp_init failed");
}
puts("Connecting...");
nested++;
- oio_req_init(&connect_req, &client, connect_cb);
- if (oio_connect(&connect_req, (struct sockaddr*) &addr)) {
- FATAL("oio_connect failed");
+ uv_req_init(&connect_req, &client, connect_cb);
+ if (uv_connect(&connect_req, (struct sockaddr*) &addr)) {
+ FATAL("uv_connect failed");
}
nested--;
- oio_run();
+ uv_run();
ASSERT(nested == 0);
ASSERT(connect_cb_called == 1 && "connect_cb must be called exactly once");
diff --git a/test/test-connection-fail.c b/test/test-connection-fail.c
index 50c1de06..ef060d1d 100644
--- a/test/test-connection-fail.c
+++ b/test/test-connection-fail.c
@@ -19,35 +19,35 @@
* IN THE SOFTWARE.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
#include
#include
-static oio_handle_t handle;
-static oio_req_t req;
+static uv_handle_t handle;
+static uv_req_t req;
static int connect_cb_calls;
static int close_cb_calls;
-static void on_close(oio_handle_t* handle, int status) {
+static void on_close(uv_handle_t* handle, int status) {
ASSERT(status == 0);
close_cb_calls++;
}
-static void on_connect(oio_req_t *req, int status) {
+static void on_connect(uv_req_t *req, int status) {
ASSERT(status == -1);
- ASSERT(oio_last_error().code == OIO_ECONNREFUSED);
+ ASSERT(uv_last_error().code == UV_ECONNREFUSED);
connect_cb_calls++;
- oio_close(req->handle);
+ uv_close(req->handle);
}
-static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
- oio_buf buf = {0, 0};
+static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
+ uv_buf buf = {0, 0};
FATAL("alloc should not be called");
return buf;
}
@@ -57,26 +57,26 @@ TEST_IMPL(connection_fail) {
struct sockaddr_in client_addr, server_addr;
int r;
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- client_addr = oio_ip4_addr("0.0.0.0", 0);
+ client_addr = uv_ip4_addr("0.0.0.0", 0);
/* There should be no servers listening on this port. */
- server_addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
+ server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
- r = oio_tcp_init(&handle, on_close, NULL);
+ r = uv_tcp_init(&handle, on_close, NULL);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
- oio_req_init(&req, &handle, on_connect);
+ uv_req_init(&req, &handle, on_connect);
- oio_bind(&handle, (struct sockaddr*)&client_addr);
- r = oio_connect(&req, (struct sockaddr*)&server_addr);
+ uv_bind(&handle, (struct sockaddr*)&client_addr);
+ r = uv_connect(&req, (struct sockaddr*)&server_addr);
ASSERT(!r);
- oio_run();
+ uv_run();
ASSERT(connect_cb_calls == 1);
ASSERT(close_cb_calls == 1);
diff --git a/test/test-delayed-accept.c b/test/test-delayed-accept.c
index adaaeceb..3c8a55f4 100644
--- a/test/test-delayed-accept.c
+++ b/test/test-delayed-accept.c
@@ -19,7 +19,7 @@
* IN THE SOFTWARE.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
#include
#include
@@ -33,7 +33,7 @@ static int close_cb_called = 0;
static int connect_cb_called = 0;
-static void close_cb(oio_handle_t* handle, int status) {
+static void close_cb(uv_handle_t* handle, int status) {
ASSERT(handle != NULL);
ASSERT(status == 0);
@@ -43,80 +43,80 @@ static void close_cb(oio_handle_t* handle, int status) {
}
-static void do_accept(oio_req_t* req, int64_t skew, int status) {
- oio_handle_t* server;
- oio_handle_t* accepted_handle = (oio_handle_t*)malloc(sizeof *accepted_handle);
+static void do_accept(uv_req_t* req, int64_t skew, int status) {
+ uv_handle_t* server;
+ uv_handle_t* accepted_handle = (uv_handle_t*)malloc(sizeof *accepted_handle);
int r;
ASSERT(req != NULL);
ASSERT(status == 0);
ASSERT(accepted_handle != NULL);
- server = (oio_handle_t*)req->data;
- r = oio_accept(server, accepted_handle, close_cb, NULL);
+ server = (uv_handle_t*)req->data;
+ r = uv_accept(server, accepted_handle, close_cb, NULL);
ASSERT(r == 0);
do_accept_called++;
/* Immediately close the accepted handle. */
- oio_close(accepted_handle);
+ uv_close(accepted_handle);
/* After accepting the two clients close the server handle */
if (do_accept_called == 2) {
- oio_close(server);
+ uv_close(server);
}
free(req);
}
-static void accept_cb(oio_handle_t* handle) {
- oio_req_t* timeout_req = (oio_req_t*)malloc(sizeof *timeout_req);
+static void accept_cb(uv_handle_t* handle) {
+ uv_req_t* timeout_req = (uv_req_t*)malloc(sizeof *timeout_req);
ASSERT(timeout_req != NULL);
/* Accept the client after 1 second */
- oio_req_init(timeout_req, NULL, &do_accept);
+ uv_req_init(timeout_req, NULL, &do_accept);
timeout_req->data = (void*)handle;
- oio_timeout(timeout_req, 1000);
+ uv_timeout(timeout_req, 1000);
accept_cb_called++;
}
static void start_server() {
- struct sockaddr_in addr = oio_ip4_addr("0.0.0.0", TEST_PORT);
- oio_handle_t* server = (oio_handle_t*)malloc(sizeof *server);
+ struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
+ uv_handle_t* server = (uv_handle_t*)malloc(sizeof *server);
int r;
ASSERT(server != NULL);
- r = oio_tcp_init(server, close_cb, NULL);
+ r = uv_tcp_init(server, close_cb, NULL);
ASSERT(r == 0);
- r = oio_bind(server, (struct sockaddr*) &addr);
+ r = uv_bind(server, (struct sockaddr*) &addr);
ASSERT(r == 0);
- r = oio_listen(server, 128, accept_cb);
+ r = uv_listen(server, 128, accept_cb);
ASSERT(r == 0);
}
-static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
+static void read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
/* The server will not send anything, it should close gracefully. */
ASSERT(handle != NULL);
ASSERT(nread == -1);
- ASSERT(oio_last_error().code == OIO_EOF);
+ ASSERT(uv_last_error().code == UV_EOF);
if (buf.base) {
free(buf.base);
}
- oio_close(handle);
+ uv_close(handle);
}
-static void connect_cb(oio_req_t* req, int status) {
+static void connect_cb(uv_req_t* req, int status) {
int r;
ASSERT(req != NULL);
@@ -124,7 +124,7 @@ static void connect_cb(oio_req_t* req, int status) {
/* Not that the server will send anything, but otherwise we'll never know */
/* when te server closes the connection. */
- r = oio_read_start(req->handle, read_cb);
+ r = uv_read_start(req->handle, read_cb);
ASSERT(r == 0);
connect_cb_called++;
@@ -134,25 +134,25 @@ static void connect_cb(oio_req_t* req, int status) {
static void client_connect() {
- struct sockaddr_in addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
- oio_handle_t* client = (oio_handle_t*)malloc(sizeof *client);
- oio_req_t* connect_req = (oio_req_t*)malloc(sizeof *connect_req);
+ struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
+ uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client);
+ uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
int r;
ASSERT(client != NULL);
ASSERT(connect_req != NULL);
- r = oio_tcp_init(client, close_cb, NULL);
+ r = uv_tcp_init(client, close_cb, NULL);
ASSERT(r == 0);
- oio_req_init(connect_req, client, connect_cb);
- r = oio_connect(connect_req, (struct sockaddr*)&addr);
+ uv_req_init(connect_req, client, connect_cb);
+ r = uv_connect(connect_req, (struct sockaddr*)&addr);
ASSERT(r == 0);
}
-static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
- oio_buf buf;
+static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
+ uv_buf buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
@@ -161,14 +161,14 @@ static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
TEST_IMPL(delayed_accept) {
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
start_server();
client_connect();
client_connect();
- oio_run();
+ uv_run();
ASSERT(accept_cb_called == 2);
ASSERT(do_accept_called == 2);
diff --git a/test/test-loop-handles.c b/test/test-loop-handles.c
index 9f8c72d8..65c84b1b 100644
--- a/test/test-loop-handles.c
+++ b/test/test-loop-handles.c
@@ -64,7 +64,7 @@
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
#include
@@ -75,15 +75,15 @@
#define TIMEOUT 100
-static oio_handle_t prepare_1_handle;
-static oio_handle_t prepare_2_handle;
+static uv_handle_t prepare_1_handle;
+static uv_handle_t prepare_2_handle;
-static oio_handle_t check_handle;
+static uv_handle_t check_handle;
-static oio_handle_t idle_1_handles[IDLE_COUNT];
-static oio_handle_t idle_2_handle;
+static uv_handle_t idle_1_handles[IDLE_COUNT];
+static uv_handle_t idle_2_handle;
-static oio_req_t timeout_req;
+static uv_req_t timeout_req;
static int loop_iteration = 0;
@@ -109,7 +109,7 @@ static int idle_2_is_active = 0;
static int timeout_cb_called = 0;
-static void timeout_cb(oio_req_t *req, int64_t skew, int status) {
+static void timeout_cb(uv_req_t *req, int64_t skew, int status) {
int r;
ASSERT(req == &timeout_req);
@@ -117,12 +117,12 @@ static void timeout_cb(oio_req_t *req, int64_t skew, int status) {
timeout_cb_called++;
- r = oio_timeout(req, TIMEOUT);
+ r = uv_timeout(req, TIMEOUT);
ASSERT(r == 0);
}
-static void idle_2_cb(oio_handle_t* handle, int status) {
+static void idle_2_cb(uv_handle_t* handle, int status) {
int r;
LOG("IDLE_2_CB\n");
@@ -132,12 +132,12 @@ static void idle_2_cb(oio_handle_t* handle, int status) {
idle_2_cb_called++;
- r = oio_close(handle);
+ r = uv_close(handle);
ASSERT(r == 0);
}
-static void idle_2_close_cb(oio_handle_t* handle, int status){
+static void idle_2_close_cb(uv_handle_t* handle, int status){
LOG("IDLE_2_CLOSE_CB\n");
ASSERT(handle == &idle_2_handle);
@@ -150,7 +150,7 @@ static void idle_2_close_cb(oio_handle_t* handle, int status){
}
-static void idle_1_cb(oio_handle_t* handle, int status) {
+static void idle_1_cb(uv_handle_t* handle, int status) {
int r;
LOG("IDLE_1_CB\n");
@@ -162,9 +162,9 @@ static void idle_1_cb(oio_handle_t* handle, int status) {
/* Init idle_2 and make it active */
if (!idle_2_is_active) {
- r = oio_idle_init(&idle_2_handle, idle_2_close_cb, NULL);
+ r = uv_idle_init(&idle_2_handle, idle_2_close_cb, NULL);
ASSERT(r == 0);
- r = oio_idle_start(&idle_2_handle, idle_2_cb);
+ r = uv_idle_start(&idle_2_handle, idle_2_cb);
ASSERT(r == 0);
idle_2_is_active = 1;
idle_2_cb_started++;
@@ -173,14 +173,14 @@ static void idle_1_cb(oio_handle_t* handle, int status) {
idle_1_cb_called++;
if (idle_1_cb_called % 5 == 0) {
- r = oio_idle_stop(handle);
+ r = uv_idle_stop(handle);
ASSERT(r == 0);
idles_1_active--;
}
}
-static void idle_1_close_cb(oio_handle_t* handle, int status){
+static void idle_1_close_cb(uv_handle_t* handle, int status){
LOG("IDLE_1_CLOSE_CB\n");
ASSERT(handle != NULL);
@@ -190,7 +190,7 @@ static void idle_1_close_cb(oio_handle_t* handle, int status){
}
-static void check_cb(oio_handle_t* handle, int status) {
+static void check_cb(uv_handle_t* handle, int status) {
int i, r;
LOG("CHECK_CB\n");
@@ -206,29 +206,29 @@ static void check_cb(oio_handle_t* handle, int status) {
if (loop_iteration < ITERATIONS) {
/* Make some idle watchers active */
for (i = 0; i < 1 + (loop_iteration % IDLE_COUNT); i++) {
- r = oio_idle_start(&idle_1_handles[i], idle_1_cb);
+ r = uv_idle_start(&idle_1_handles[i], idle_1_cb);
ASSERT(r == 0);
idles_1_active++;
}
} else {
/* End of the test - close all handles */
- r = oio_close(&prepare_1_handle);
+ r = uv_close(&prepare_1_handle);
ASSERT(r == 0);
- r = oio_close(&check_handle);
+ r = uv_close(&check_handle);
ASSERT(r == 0);
- r = oio_close(&prepare_2_handle);
+ r = uv_close(&prepare_2_handle);
ASSERT(r == 0);
for (i = 0; i < IDLE_COUNT; i++) {
- r = oio_close(&idle_1_handles[i]);
+ r = uv_close(&idle_1_handles[i]);
ASSERT(r == 0);
}
/* This handle is closed/recreated every time, close it only if it is */
/* active.*/
if (idle_2_is_active) {
- r = oio_close(&idle_2_handle);
+ r = uv_close(&idle_2_handle);
ASSERT(r == 0);
}
}
@@ -237,7 +237,7 @@ static void check_cb(oio_handle_t* handle, int status) {
}
-static void check_close_cb(oio_handle_t* handle, int status){
+static void check_close_cb(uv_handle_t* handle, int status){
LOG("CHECK_CLOSE_CB\n");
ASSERT(handle == &check_handle);
ASSERT(status == 0);
@@ -246,7 +246,7 @@ static void check_close_cb(oio_handle_t* handle, int status){
}
-static void prepare_2_cb(oio_handle_t* handle, int status) {
+static void prepare_2_cb(uv_handle_t* handle, int status) {
int r;
LOG("PREPARE_2_CB\n");
@@ -263,14 +263,14 @@ static void prepare_2_cb(oio_handle_t* handle, int status) {
/* (loop_iteration % 2 == 0) cannot be true. */
ASSERT(loop_iteration % 2 != 0);
- r = oio_prepare_stop(handle);
+ r = uv_prepare_stop(handle);
ASSERT(r == 0);
prepare_2_cb_called++;
}
-static void prepare_2_close_cb(oio_handle_t* handle, int status){
+static void prepare_2_close_cb(uv_handle_t* handle, int status){
LOG("PREPARE_2_CLOSE_CB\n");
ASSERT(handle == &prepare_2_handle);
ASSERT(status == 0);
@@ -279,7 +279,7 @@ static void prepare_2_close_cb(oio_handle_t* handle, int status){
}
-static void prepare_1_cb(oio_handle_t* handle, int status) {
+static void prepare_1_cb(uv_handle_t* handle, int status) {
int r;
LOG("PREPARE_1_CB\n");
@@ -293,7 +293,7 @@ static void prepare_1_cb(oio_handle_t* handle, int status) {
*/
if (loop_iteration % 2 == 0) {
- r = oio_prepare_start(&prepare_2_handle, prepare_2_cb);
+ r = uv_prepare_start(&prepare_2_handle, prepare_2_cb);
ASSERT(r == 0);
}
@@ -304,7 +304,7 @@ static void prepare_1_cb(oio_handle_t* handle, int status) {
}
-static void prepare_1_close_cb(oio_handle_t* handle, int status){
+static void prepare_1_close_cb(uv_handle_t* handle, int status){
LOG("PREPARE_1_CLOSE_CB");
ASSERT(handle == &prepare_1_handle);
ASSERT(status == 0);
@@ -313,8 +313,8 @@ static void prepare_1_close_cb(oio_handle_t* handle, int status){
}
-static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
- oio_buf rv = { 0, 0 };
+static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
+ uv_buf rv = { 0, 0 };
FATAL("alloc_cb should never be called in this test");
return rv;
}
@@ -324,25 +324,25 @@ TEST_IMPL(loop_handles) {
int i;
int r;
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- r = oio_prepare_init(&prepare_1_handle, prepare_1_close_cb, NULL);
+ r = uv_prepare_init(&prepare_1_handle, prepare_1_close_cb, NULL);
ASSERT(r == 0);
- r = oio_prepare_start(&prepare_1_handle, prepare_1_cb);
+ r = uv_prepare_start(&prepare_1_handle, prepare_1_cb);
ASSERT(r == 0);
- r = oio_check_init(&check_handle, check_close_cb, NULL);
+ r = uv_check_init(&check_handle, check_close_cb, NULL);
ASSERT(r == 0);
- r = oio_check_start(&check_handle, check_cb);
+ r = uv_check_start(&check_handle, check_cb);
ASSERT(r == 0);
/* initialize only, prepare_2 is started by prepare_1_cb */
- r = oio_prepare_init(&prepare_2_handle, prepare_2_close_cb, NULL);
+ r = uv_prepare_init(&prepare_2_handle, prepare_2_close_cb, NULL);
ASSERT(r == 0);
for (i = 0; i < IDLE_COUNT; i++) {
/* initialize only, idle_1 handles are started by check_cb */
- r = oio_idle_init(&idle_1_handles[i], idle_1_close_cb, NULL);
+ r = uv_idle_init(&idle_1_handles[i], idle_1_close_cb, NULL);
ASSERT(r == 0);
}
@@ -350,12 +350,12 @@ TEST_IMPL(loop_handles) {
/* the timer callback is there to keep the event loop polling */
/* unref it as it is not supposed to keep the loop alive */
- oio_req_init(&timeout_req, NULL, timeout_cb);
- r = oio_timeout(&timeout_req, TIMEOUT);
+ uv_req_init(&timeout_req, NULL, timeout_cb);
+ r = uv_timeout(&timeout_req, TIMEOUT);
ASSERT(r == 0);
- oio_unref();
+ uv_unref();
- r = oio_run();
+ r = uv_run();
ASSERT(r == 0);
ASSERT(loop_iteration == ITERATIONS);
diff --git a/test/test-ping-pong.c b/test/test-ping-pong.c
index 01da3bc4..56ea1db1 100644
--- a/test/test-ping-pong.c
+++ b/test/test-ping-pong.c
@@ -19,7 +19,7 @@
* IN THE SOFTWARE.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
#include
@@ -39,16 +39,16 @@ static char PING[] = "PING\n";
typedef struct {
int pongs;
int state;
- oio_handle_t handle;
- oio_req_t connect_req;
- oio_req_t read_req;
+ uv_handle_t handle;
+ uv_req_t connect_req;
+ uv_req_t read_req;
char read_buffer[BUFSIZE];
} pinger_t;
void pinger_try_read(pinger_t* pinger);
-static void pinger_on_close(oio_handle_t* handle, int status) {
+static void pinger_on_close(uv_handle_t* handle, int status) {
pinger_t* pinger = (pinger_t*)handle->data;
ASSERT(status == 0);
@@ -60,7 +60,7 @@ static void pinger_on_close(oio_handle_t* handle, int status) {
}
-static void pinger_after_write(oio_req_t *req, int status) {
+static void pinger_after_write(uv_req_t *req, int status) {
ASSERT(status == 0);
free(req);
@@ -68,31 +68,31 @@ static void pinger_after_write(oio_req_t *req, int status) {
static void pinger_write_ping(pinger_t* pinger) {
- oio_req_t *req;
- oio_buf buf;
+ uv_req_t *req;
+ uv_buf buf;
buf.base = (char*)&PING;
buf.len = strlen(PING);
- req = (oio_req_t*)malloc(sizeof(*req));
- oio_req_init(req, &pinger->handle, pinger_after_write);
+ req = (uv_req_t*)malloc(sizeof(*req));
+ uv_req_init(req, &pinger->handle, pinger_after_write);
- if (oio_write(req, &buf, 1)) {
- FATAL("oio_write failed");
+ if (uv_write(req, &buf, 1)) {
+ FATAL("uv_write failed");
}
puts("PING");
}
-static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
+static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
unsigned int i;
pinger_t* pinger;
pinger = (pinger_t*)handle->data;
if (nread < 0) {
- ASSERT(oio_last_error().code == OIO_EOF);
+ ASSERT(uv_last_error().code == UV_EOF);
puts("got EOF");
@@ -100,7 +100,7 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
free(buf.base);
}
- oio_close(&pinger->handle);
+ uv_close(&pinger->handle);
return;
}
@@ -115,7 +115,7 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
if (pinger->pongs < NUM_PINGS) {
pinger_write_ping(pinger);
} else {
- oio_close(&pinger->handle);
+ uv_close(&pinger->handle);
return;
}
}
@@ -123,20 +123,20 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
}
-static void pinger_on_connect(oio_req_t *req, int status) {
+static void pinger_on_connect(uv_req_t *req, int status) {
pinger_t *pinger = (pinger_t*)req->handle->data;
ASSERT(status == 0);
pinger_write_ping(pinger);
- oio_read_start(req->handle, pinger_read_cb);
+ uv_read_start(req->handle, pinger_read_cb);
}
static void pinger_new() {
int r;
- struct sockaddr_in server_addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
+ struct sockaddr_in server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
pinger_t *pinger;
pinger = (pinger_t*)malloc(sizeof(*pinger));
@@ -144,20 +144,20 @@ static void pinger_new() {
pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
- r = oio_tcp_init(&pinger->handle, pinger_on_close, (void*)pinger);
+ r = uv_tcp_init(&pinger->handle, pinger_on_close, (void*)pinger);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
- oio_req_init(&pinger->connect_req, &pinger->handle, pinger_on_connect);
+ uv_req_init(&pinger->connect_req, &pinger->handle, pinger_on_connect);
- r = oio_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
+ r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
ASSERT(!r);
}
-static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
- oio_buf buf;
+static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
+ uv_buf buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
@@ -165,10 +165,10 @@ static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
TEST_IMPL(ping_pong) {
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
pinger_new();
- oio_run();
+ uv_run();
ASSERT(completed_pingers == 1);
diff --git a/test/test-tcp-writealot.c b/test/test-tcp-writealot.c
index 39822e83..99659997 100644
--- a/test/test-tcp-writealot.c
+++ b/test/test-tcp-writealot.c
@@ -19,7 +19,7 @@
* IN THE SOFTWARE.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
#include
#include
@@ -45,7 +45,7 @@ static int bytes_received = 0;
static int bytes_received_done = 0;
-static void close_cb(oio_handle_t* handle, int status) {
+static void close_cb(uv_handle_t* handle, int status) {
ASSERT(handle != NULL);
ASSERT(status == 0);
@@ -55,7 +55,7 @@ static void close_cb(oio_handle_t* handle, int status) {
}
-static void shutdown_cb(oio_req_t* req, int status) {
+static void shutdown_cb(uv_req_t* req, int status) {
ASSERT(req);
ASSERT(status == 0);
@@ -72,18 +72,18 @@ static void shutdown_cb(oio_req_t* req, int status) {
}
-static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
+static void read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
ASSERT(handle != NULL);
if (nread < 0) {
- ASSERT(oio_last_error().code == OIO_EOF);
+ ASSERT(uv_last_error().code == UV_EOF);
printf("GOT EOF\n");
if (buf.base) {
free(buf.base);
}
- oio_close(handle);
+ uv_close(handle);
return;
}
@@ -93,12 +93,12 @@ static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
}
-static void write_cb(oio_req_t* req, int status) {
+static void write_cb(uv_req_t* req, int status) {
ASSERT(req != NULL);
if (status) {
- oio_err_t err = oio_last_error();
- fprintf(stderr, "oio_write error: %s\n", oio_strerror(err));
+ uv_err_t err = uv_last_error();
+ fprintf(stderr, "uv_write error: %s\n", uv_strerror(err));
ASSERT(0);
}
@@ -109,9 +109,9 @@ static void write_cb(oio_req_t* req, int status) {
}
-static void connect_cb(oio_req_t* req, int status) {
- oio_buf send_bufs[CHUNKS_PER_WRITE];
- oio_handle_t* handle;
+static void connect_cb(uv_req_t* req, int status) {
+ uv_buf send_bufs[CHUNKS_PER_WRITE];
+ uv_handle_t* handle;
int i, j, r;
ASSERT(req != NULL);
@@ -130,33 +130,33 @@ static void connect_cb(oio_req_t* req, int status) {
bytes_sent += CHUNK_SIZE;
}
- req = (oio_req_t*)malloc(sizeof *req);
+ req = (uv_req_t*)malloc(sizeof *req);
ASSERT(req != NULL);
- oio_req_init(req, handle, write_cb);
- r = oio_write(req, (oio_buf*)&send_bufs, CHUNKS_PER_WRITE);
+ uv_req_init(req, handle, write_cb);
+ r = uv_write(req, (uv_buf*)&send_bufs, CHUNKS_PER_WRITE);
ASSERT(r == 0);
}
/* Shutdown on drain. FIXME: dealloc req? */
- req = (oio_req_t*) malloc(sizeof(oio_req_t));
+ req = (uv_req_t*) malloc(sizeof(uv_req_t));
ASSERT(req != NULL);
- oio_req_init(req, handle, shutdown_cb);
- r = oio_shutdown(req);
+ uv_req_init(req, handle, shutdown_cb);
+ r = uv_shutdown(req);
ASSERT(r == 0);
/* Start reading */
- req = (oio_req_t*)malloc(sizeof *req);
+ req = (uv_req_t*)malloc(sizeof *req);
ASSERT(req != NULL);
- oio_req_init(req, handle, read_cb);
- r = oio_read_start(handle, read_cb);
+ uv_req_init(req, handle, read_cb);
+ r = uv_read_start(handle, read_cb);
ASSERT(r == 0);
}
-static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
- oio_buf buf;
+static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
+ uv_buf buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
@@ -164,9 +164,9 @@ static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
TEST_IMPL(tcp_writealot) {
- struct sockaddr_in addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
- oio_handle_t* client = (oio_handle_t*)malloc(sizeof *client);
- oio_req_t* connect_req = (oio_req_t*)malloc(sizeof *connect_req);
+ struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
+ uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client);
+ uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
int r;
ASSERT(client != NULL);
@@ -176,16 +176,16 @@ TEST_IMPL(tcp_writealot) {
ASSERT(send_buffer != NULL);
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- r = oio_tcp_init(client, close_cb, NULL);
+ r = uv_tcp_init(client, close_cb, NULL);
ASSERT(r == 0);
- oio_req_init(connect_req, client, connect_cb);
- r = oio_connect(connect_req, (struct sockaddr*)&addr);
+ uv_req_init(connect_req, client, connect_cb);
+ r = uv_connect(connect_req, (struct sockaddr*)&addr);
ASSERT(r == 0);
- oio_run();
+ uv_run();
ASSERT(shutdown_cb_called == 1);
ASSERT(connect_cb_called == 1);
diff --git a/test/test-timeout.c b/test/test-timeout.c
index dbfaff4d..ea984eb6 100644
--- a/test/test-timeout.c
+++ b/test/test-timeout.c
@@ -19,7 +19,7 @@
* IN THE SOFTWARE.
*/
-#include "../oio.h"
+#include "../uv.h"
#include "task.h"
@@ -28,7 +28,7 @@ static int timeouts = 0;
static int64_t start_time;
-static void timeout_cb(oio_req_t *req, int64_t skew, int status) {
+static void timeout_cb(uv_req_t *req, int64_t skew, int status) {
ASSERT(req != NULL);
ASSERT(status == 0);
@@ -36,11 +36,11 @@ static void timeout_cb(oio_req_t *req, int64_t skew, int status) {
timeouts++;
/* Just call this randomly for the code coverage. */
- oio_update_time();
+ uv_update_time();
}
-static void exit_timeout_cb(oio_req_t *req, int64_t skew, int status) {
- int64_t now = oio_now();
+static void exit_timeout_cb(uv_req_t *req, int64_t skew, int status) {
+ int64_t now = uv_now();
ASSERT(req != NULL);
ASSERT(status == 0);
ASSERT(timeouts == expected);
@@ -48,57 +48,57 @@ static void exit_timeout_cb(oio_req_t *req, int64_t skew, int status) {
exit(0);
}
-static void dummy_timeout_cb(oio_req_t *req, int64_t skew, int status) {
+static void dummy_timeout_cb(uv_req_t *req, int64_t skew, int status) {
/* Should never be called */
FATAL("dummy_timer_cb should never be called");
}
-static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
- oio_buf buf = {0, 0};
+static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
+ uv_buf buf = {0, 0};
FATAL("alloc should not be called");
return buf;
}
TEST_IMPL(timeout) {
- oio_req_t *req;
- oio_req_t exit_req;
- oio_req_t dummy_req;
+ uv_req_t *req;
+ uv_req_t exit_req;
+ uv_req_t dummy_req;
int i;
- oio_init(alloc_cb);
+ uv_init(alloc_cb);
- start_time = oio_now();
+ start_time = uv_now();
ASSERT(0 < start_time);
/* Let 10 timers time out in 500 ms total. */
for (i = 0; i < 10; i++) {
- req = (oio_req_t*)malloc(sizeof(*req));
+ req = (uv_req_t*)malloc(sizeof(*req));
ASSERT(req != NULL);
- oio_req_init(req, NULL, timeout_cb);
+ uv_req_init(req, NULL, timeout_cb);
- if (oio_timeout(req, i * 50) < 0) {
- FATAL("oio_timeout failed");
+ if (uv_timeout(req, i * 50) < 0) {
+ FATAL("uv_timeout failed");
}
expected++;
}
/* The 11th timer exits the test and runs after 1 s. */
- oio_req_init(&exit_req, NULL, exit_timeout_cb);
- if (oio_timeout(&exit_req, 1000) < 0) {
- FATAL("oio_timeout failed");
+ uv_req_init(&exit_req, NULL, exit_timeout_cb);
+ if (uv_timeout(&exit_req, 1000) < 0) {
+ FATAL("uv_timeout failed");
}
/* The 12th timer should never run. */
- oio_req_init(&dummy_req, NULL, dummy_timeout_cb);
- if (oio_timeout(&dummy_req, 2000)) {
- FATAL("oio_timeout failed");
+ uv_req_init(&dummy_req, NULL, dummy_timeout_cb);
+ if (uv_timeout(&dummy_req, 2000)) {
+ FATAL("uv_timeout failed");
}
- oio_run();
+ uv_run();
FATAL("should never get here");
return 2;
diff --git a/tree.h b/tree.h
index 29a60316..37966d35 100644
--- a/tree.h
+++ b/tree.h
@@ -23,8 +23,8 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _OIO_TREE_H_
-#define _OIO_TREE_H_
+#ifndef _UV_TREE_H_
+#define _UV_TREE_H_
#define __unused
@@ -759,4 +759,4 @@ name##_RB_MINMAX(struct name *head, int val) \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
-#endif /* _OIO_TREE_H_ */
+#endif /* _UV_TREE_H_ */
diff --git a/oio-unix.c b/uv-unix.c
similarity index 64%
rename from oio-unix.c
rename to uv-unix.c
index a9cca0c9..873f6434 100644
--- a/oio-unix.c
+++ b/uv-unix.c
@@ -19,7 +19,7 @@
* IN THE SOFTWARE.
*/
-#include "oio.h"
+#include "uv.h"
#include /* NULL */
#include /* printf */
@@ -34,71 +34,71 @@
#include
-static oio_err_t last_err;
-static oio_alloc_cb alloc_cb;
+static uv_err_t last_err;
+static uv_alloc_cb alloc_cb;
-void oio__tcp_io(EV_P_ ev_io* watcher, int revents);
-void oio__next(EV_P_ ev_idle* watcher, int revents);
-static void oio__tcp_connect(oio_handle_t* handle);
-int oio_tcp_open(oio_handle_t*, int fd);
-static void oio__finish_close(oio_handle_t* handle);
+void uv__tcp_io(EV_P_ ev_io* watcher, int revents);
+void uv__next(EV_P_ ev_idle* watcher, int revents);
+static void uv__tcp_connect(uv_handle_t* handle);
+int uv_tcp_open(uv_handle_t*, int fd);
+static void uv__finish_close(uv_handle_t* handle);
/* flags */
enum {
- OIO_CLOSING = 0x00000001, /* oio_close() called but not finished. */
- OIO_CLOSED = 0x00000002, /* close(2) finished. */
- OIO_READING = 0x00000004, /* oio_read_start() called. */
- OIO_SHUTTING = 0x00000008, /* oio_shutdown() called but not complete. */
- OIO_SHUT = 0x00000010, /* Write side closed. */
+ UV_CLOSING = 0x00000001, /* uv_close() called but not finished. */
+ UV_CLOSED = 0x00000002, /* close(2) finished. */
+ UV_READING = 0x00000004, /* uv_read_start() called. */
+ UV_SHUTTING = 0x00000008, /* uv_shutdown() called but not complete. */
+ UV_SHUT = 0x00000010, /* Write side closed. */
};
-void oio_flag_set(oio_handle_t* handle, int flag) {
+void uv_flag_set(uv_handle_t* handle, int flag) {
handle->flags |= flag;
}
-oio_err_t oio_last_error() {
+uv_err_t uv_last_error() {
return last_err;
}
-char* oio_strerror(oio_err_t err) {
+char* uv_strerror(uv_err_t err) {
return strerror(err.sys_errno_);
}
-void oio_flag_unset(oio_handle_t* handle, int flag) {
+void uv_flag_unset(uv_handle_t* handle, int flag) {
handle->flags = handle->flags & ~flag;
}
-int oio_flag_is_set(oio_handle_t* handle, int flag) {
+int uv_flag_is_set(uv_handle_t* handle, int flag) {
return (handle->flags & flag) != 0;
}
-static oio_err_code oio_translate_sys_error(int sys_errno) {
+static uv_err_code uv_translate_sys_error(int sys_errno) {
switch (sys_errno) {
- case 0: return OIO_OK;
- case EACCES: return OIO_EACCESS;
- case EAGAIN: return OIO_EAGAIN;
- case ECONNRESET: return OIO_ECONNRESET;
- case EFAULT: return OIO_EFAULT;
- case EMFILE: return OIO_EMFILE;
- case EINVAL: return OIO_EINVAL;
- case ECONNREFUSED: return OIO_ECONNREFUSED;
- case EADDRINUSE: return OIO_EADDRINUSE;
- case EADDRNOTAVAIL: return OIO_EADDRNOTAVAIL;
- default: return OIO_UNKNOWN;
+ case 0: return UV_OK;
+ case EACCES: return UV_EACCESS;
+ case EAGAIN: return UV_EAGAIN;
+ case ECONNRESET: return UV_ECONNRESET;
+ case EFAULT: return UV_EFAULT;
+ case EMFILE: return UV_EMFILE;
+ case EINVAL: return UV_EINVAL;
+ case ECONNREFUSED: return UV_ECONNREFUSED;
+ case EADDRINUSE: return UV_EADDRINUSE;
+ case EADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
+ default: return UV_UNKNOWN;
}
}
-static oio_err_t oio_err_new_artificial(oio_handle_t* handle, int code) {
- oio_err_t err;
+static uv_err_t uv_err_new_artificial(uv_handle_t* handle, int code) {
+ uv_err_t err;
err.sys_errno_ = 0;
err.code = code;
last_err = err;
@@ -106,16 +106,16 @@ static oio_err_t oio_err_new_artificial(oio_handle_t* handle, int code) {
}
-static oio_err_t oio_err_new(oio_handle_t* handle, int sys_error) {
- oio_err_t err;
+static uv_err_t uv_err_new(uv_handle_t* handle, int sys_error) {
+ uv_err_t err;
err.sys_errno_ = sys_error;
- err.code = oio_translate_sys_error(sys_error);
+ err.code = uv_translate_sys_error(sys_error);
last_err = err;
return err;
}
-struct sockaddr_in oio_ip4_addr(char* ip, int port) {
+struct sockaddr_in uv_ip4_addr(char* ip, int port) {
struct sockaddr_in addr;
addr.sin_family = AF_INET;
@@ -126,26 +126,26 @@ struct sockaddr_in oio_ip4_addr(char* ip, int port) {
}
-int oio_close(oio_handle_t* handle) {
+int uv_close(uv_handle_t* handle) {
switch (handle->type) {
- case OIO_TCP:
+ case UV_TCP:
ev_io_stop(EV_DEFAULT_ &handle->write_watcher);
ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
break;
- case OIO_PREPARE:
+ case UV_PREPARE:
ev_prepare_stop(EV_DEFAULT_ &handle->prepare_watcher);
break;
- case OIO_CHECK:
+ case UV_CHECK:
ev_check_stop(EV_DEFAULT_ &handle->check_watcher);
break;
- case OIO_IDLE:
+ case UV_IDLE:
ev_idle_stop(EV_DEFAULT_ &handle->idle_watcher);
break;
- case OIO_ASYNC:
+ case UV_ASYNC:
ev_async_stop(EV_DEFAULT_ &handle->async_watcher);
break;
@@ -154,7 +154,7 @@ int oio_close(oio_handle_t* handle) {
return -1;
}
- oio_flag_set(handle, OIO_CLOSING);
+ uv_flag_set(handle, UV_CLOSING);
/* This is used to call the on_close callback in the next loop. */
ev_idle_start(EV_DEFAULT_ &handle->next_watcher);
@@ -165,34 +165,34 @@ int oio_close(oio_handle_t* handle) {
}
-void oio_init(oio_alloc_cb cb) {
+void uv_init(uv_alloc_cb cb) {
assert(cb);
alloc_cb = cb;
ev_default_loop(0);
}
-int oio_run() {
+int uv_run() {
ev_run(EV_DEFAULT_ 0);
return 0;
}
-static void oio__handle_init(oio_handle_t* handle, oio_handle_type type,
- oio_close_cb close_cb, void* data) {
+static void uv__handle_init(uv_handle_t* handle, uv_handle_type type,
+ uv_close_cb close_cb, void* data) {
handle->type = type;
handle->close_cb = close_cb;
handle->data = data;
handle->flags = 0;
- ev_init(&handle->next_watcher, oio__next);
+ ev_init(&handle->next_watcher, uv__next);
handle->next_watcher.data = handle;
}
-int oio_tcp_init(oio_handle_t* handle, oio_close_cb close_cb,
+int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb,
void* data) {
- oio__handle_init(handle, OIO_TCP, close_cb, data);
+ uv__handle_init(handle, UV_TCP, close_cb, data);
handle->connect_req = NULL;
handle->accepted_fd = -1;
@@ -201,10 +201,10 @@ int oio_tcp_init(oio_handle_t* handle, oio_close_cb close_cb,
ngx_queue_init(&handle->write_queue);
handle->write_queue_size = 0;
- ev_init(&handle->read_watcher, oio__tcp_io);
+ ev_init(&handle->read_watcher, uv__tcp_io);
handle->read_watcher.data = handle;
- ev_init(&handle->write_watcher, oio__tcp_io);
+ ev_init(&handle->write_watcher, uv__tcp_io);
handle->write_watcher.data = handle;
assert(ngx_queue_empty(&handle->write_queue));
@@ -214,7 +214,7 @@ int oio_tcp_init(oio_handle_t* handle, oio_close_cb close_cb,
}
-int oio_bind(oio_handle_t* handle, struct sockaddr* addr) {
+int uv_bind(uv_handle_t* handle, struct sockaddr* addr) {
int addrsize;
int domain;
int r;
@@ -222,11 +222,11 @@ int oio_bind(oio_handle_t* handle, struct sockaddr* addr) {
if (handle->fd <= 0) {
int fd = socket(AF_INET, SOCK_STREAM, 0);
if (fd < 0) {
- oio_err_new(handle, errno);
+ uv_err_new(handle, errno);
return -1;
}
- if (oio_tcp_open(handle, fd)) {
+ if (uv_tcp_open(handle, fd)) {
close(fd);
return -2;
}
@@ -241,7 +241,7 @@ int oio_bind(oio_handle_t* handle, struct sockaddr* addr) {
addrsize = sizeof(struct sockaddr_in6);
domain = AF_INET6;
} else {
- oio_err_new(handle, EFAULT);
+ uv_err_new(handle, EFAULT);
return -1;
}
@@ -255,7 +255,7 @@ int oio_bind(oio_handle_t* handle, struct sockaddr* addr) {
return 0;
default:
- oio_err_new(handle, errno);
+ uv_err_new(handle, errno);
return -1;
}
}
@@ -264,7 +264,7 @@ int oio_bind(oio_handle_t* handle, struct sockaddr* addr) {
}
-int oio_tcp_open(oio_handle_t* handle, int fd) {
+int uv_tcp_open(uv_handle_t* handle, int fd) {
assert(fd >= 0);
handle->fd = fd;
@@ -281,24 +281,24 @@ int oio_tcp_open(oio_handle_t* handle, int fd) {
ev_io_set(&handle->read_watcher, fd, EV_READ);
ev_io_set(&handle->write_watcher, fd, EV_WRITE);
- /* These should have been set up by oio_tcp_init. */
+ /* These should have been set up by uv_tcp_init. */
assert(handle->next_watcher.data == handle);
assert(handle->write_watcher.data == handle);
assert(handle->read_watcher.data == handle);
- assert(handle->read_watcher.cb == oio__tcp_io);
- assert(handle->write_watcher.cb == oio__tcp_io);
+ assert(handle->read_watcher.cb == uv__tcp_io);
+ assert(handle->write_watcher.cb == uv__tcp_io);
return 0;
}
-void oio__server_io(EV_P_ ev_io* watcher, int revents) {
- oio_handle_t* handle = watcher->data;
+void uv__server_io(EV_P_ ev_io* watcher, int revents) {
+ uv_handle_t* handle = watcher->data;
assert(watcher == &handle->read_watcher ||
watcher == &handle->write_watcher);
assert(revents == EV_READ);
- assert(!oio_flag_is_set(handle, OIO_CLOSING));
+ assert(!uv_flag_is_set(handle, UV_CLOSING));
if (handle->accepted_fd >= 0) {
ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
@@ -320,15 +320,15 @@ void oio__server_io(EV_P_ ev_io* watcher, int revents) {
/* TODO special trick. unlock reserved socket, accept, close. */
return;
} else {
- oio_err_new(handle, errno);
- oio_close(handle);
+ uv_err_new(handle, errno);
+ uv_close(handle);
}
} else {
handle->accepted_fd = fd;
handle->accept_cb(handle);
if (handle->accepted_fd >= 0) {
- /* The user hasn't yet accepted called oio_accept() */
+ /* The user hasn't yet accepted called uv_accept() */
ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
return;
}
@@ -337,17 +337,17 @@ void oio__server_io(EV_P_ ev_io* watcher, int revents) {
}
-int oio_accept(oio_handle_t* server, oio_handle_t* client,
- oio_close_cb close_cb, void* data) {
+int uv_accept(uv_handle_t* server, uv_handle_t* client,
+ uv_close_cb close_cb, void* data) {
if (server->accepted_fd < 0) {
return -1;
}
- if (oio_tcp_init(client, close_cb, data)) {
+ if (uv_tcp_init(client, close_cb, data)) {
return -1;
}
- if (oio_tcp_open(client, server->accepted_fd)) {
+ if (uv_tcp_open(client, server->accepted_fd)) {
/* Ignore error for now */
server->accepted_fd = -1;
close(server->accepted_fd);
@@ -360,17 +360,17 @@ int oio_accept(oio_handle_t* server, oio_handle_t* client,
}
-int oio_listen(oio_handle_t* handle, int backlog, oio_accept_cb cb) {
+int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) {
assert(handle->fd >= 0);
if (handle->delayed_error) {
- oio_err_new(handle, handle->delayed_error);
+ uv_err_new(handle, handle->delayed_error);
return -1;
}
int r = listen(handle->fd, backlog);
if (r < 0) {
- oio_err_new(handle, errno);
+ uv_err_new(handle, errno);
return -1;
}
@@ -378,22 +378,22 @@ int oio_listen(oio_handle_t* handle, int backlog, oio_accept_cb cb) {
/* Start listening for connections. */
ev_io_set(&handle->read_watcher, handle->fd, EV_READ);
- ev_set_cb(&handle->read_watcher, oio__server_io);
+ ev_set_cb(&handle->read_watcher, uv__server_io);
ev_io_start(EV_DEFAULT_ &handle->read_watcher);
return 0;
}
-void oio__finish_close(oio_handle_t* handle) {
- assert(oio_flag_is_set(handle, OIO_CLOSING));
- assert(!oio_flag_is_set(handle, OIO_CLOSED));
- oio_flag_set(handle, OIO_CLOSED);
+void uv__finish_close(uv_handle_t* handle) {
+ assert(uv_flag_is_set(handle, UV_CLOSING));
+ assert(!uv_flag_is_set(handle, UV_CLOSED));
+ uv_flag_set(handle, UV_CLOSED);
switch (handle->type) {
- case OIO_TCP:
+ case UV_TCP:
/* XXX Is it necessary to stop these watchers here? weren't they
- * supposed to be stopped in oio_close()?
+ * supposed to be stopped in uv_close()?
*/
ev_io_stop(EV_DEFAULT_ &handle->write_watcher);
ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
@@ -410,19 +410,19 @@ void oio__finish_close(oio_handle_t* handle) {
}
break;
- case OIO_PREPARE:
+ case UV_PREPARE:
assert(!ev_is_active(&handle->prepare_watcher));
break;
- case OIO_CHECK:
+ case UV_CHECK:
assert(!ev_is_active(&handle->check_watcher));
break;
- case OIO_IDLE:
+ case UV_IDLE:
assert(!ev_is_active(&handle->idle_watcher));
break;
- case OIO_ASYNC:
+ case UV_ASYNC:
assert(!ev_is_active(&handle->async_watcher));
break;
}
@@ -435,7 +435,7 @@ void oio__finish_close(oio_handle_t* handle) {
}
-oio_req_t* oio_write_queue_head(oio_handle_t* handle) {
+uv_req_t* uv_write_queue_head(uv_handle_t* handle) {
if (ngx_queue_empty(&handle->write_queue)) {
return NULL;
}
@@ -445,74 +445,74 @@ oio_req_t* oio_write_queue_head(oio_handle_t* handle) {
return NULL;
}
- oio_req_t* req = ngx_queue_data(q, struct oio_req_s, queue);
+ uv_req_t* req = ngx_queue_data(q, struct uv_req_s, queue);
assert(req);
return req;
}
-void oio__next(EV_P_ ev_idle* watcher, int revents) {
- oio_handle_t* handle = watcher->data;
+void uv__next(EV_P_ ev_idle* watcher, int revents) {
+ uv_handle_t* handle = watcher->data;
assert(watcher == &handle->next_watcher);
assert(revents == EV_IDLE);
/* For now this function is only to handle the closing event, but we might
* put more stuff here later.
*/
- assert(oio_flag_is_set(handle, OIO_CLOSING));
- oio__finish_close(handle);
+ assert(uv_flag_is_set(handle, UV_CLOSING));
+ uv__finish_close(handle);
}
-static void oio__drain(oio_handle_t* handle) {
- assert(!oio_write_queue_head(handle));
+static void uv__drain(uv_handle_t* handle) {
+ assert(!uv_write_queue_head(handle));
assert(handle->write_queue_size == 0);
ev_io_stop(EV_DEFAULT_ &handle->write_watcher);
/* Shutdown? */
- if (oio_flag_is_set(handle, OIO_SHUTTING) &&
- !oio_flag_is_set(handle, OIO_CLOSING) &&
- !oio_flag_is_set(handle, OIO_SHUT)) {
+ if (uv_flag_is_set(handle, UV_SHUTTING) &&
+ !uv_flag_is_set(handle, UV_CLOSING) &&
+ !uv_flag_is_set(handle, UV_SHUT)) {
assert(handle->shutdown_req);
- oio_req_t* req = handle->shutdown_req;
- oio_shutdown_cb cb = req->cb;
+ uv_req_t* req = handle->shutdown_req;
+ uv_shutdown_cb cb = req->cb;
if (shutdown(handle->fd, SHUT_WR)) {
/* Error. Nothing we can do, close the handle. */
- oio_err_new(handle, errno);
- oio_close(handle);
+ uv_err_new(handle, errno);
+ uv_close(handle);
if (cb) cb(req, -1);
} else {
- oio_err_new(handle, 0);
- oio_flag_set(handle, OIO_SHUT);
+ uv_err_new(handle, 0);
+ uv_flag_set(handle, UV_SHUT);
if (cb) cb(req, 0);
}
}
}
-void oio__write(oio_handle_t* handle) {
+void uv__write(uv_handle_t* handle) {
assert(handle->fd >= 0);
/* TODO: should probably while(1) here until EAGAIN */
/* Get the request at the head of the queue. */
- oio_req_t* req = oio_write_queue_head(handle);
+ uv_req_t* req = uv_write_queue_head(handle);
if (!req) {
assert(handle->write_queue_size == 0);
- oio__drain(handle);
+ uv__drain(handle);
return;
}
assert(req->handle == handle);
- /* Cast to iovec. We had to have our own oio_buf instead of iovec
+ /* Cast to iovec. We had to have our own uv_buf instead of iovec
* because Windows's WSABUF is not an iovec.
*/
- assert(sizeof(oio_buf) == sizeof(struct iovec));
+ assert(sizeof(uv_buf) == sizeof(struct iovec));
struct iovec* iov = (struct iovec*) &(req->bufs[req->write_index]);
int iovcnt = req->bufcnt - req->write_index;
@@ -522,14 +522,14 @@ void oio__write(oio_handle_t* handle) {
ssize_t n = writev(handle->fd, iov, iovcnt);
- oio_write_cb cb = req->cb;
+ uv_write_cb cb = req->cb;
if (n < 0) {
if (errno != EAGAIN) {
- oio_err_t err = oio_err_new(handle, errno);
+ uv_err_t err = uv_err_new(handle, errno);
/* XXX How do we handle the error? Need test coverage here. */
- oio_close(handle);
+ uv_close(handle);
if (cb) {
cb(req, -1);
@@ -541,7 +541,7 @@ void oio__write(oio_handle_t* handle) {
/* The loop updates the counters. */
while (n > 0) {
- oio_buf* buf = &(req->bufs[req->write_index]);
+ uv_buf* buf = &(req->bufs[req->write_index]);
size_t len = buf->len;
assert(req->write_index < req->bufcnt);
@@ -583,7 +583,7 @@ void oio__write(oio_handle_t* handle) {
assert(handle->write_queue_size > 0);
} else {
/* Write queue drained. */
- oio__drain(handle);
+ uv__drain(handle);
}
return;
@@ -601,13 +601,13 @@ void oio__write(oio_handle_t* handle) {
}
-void oio__read(oio_handle_t* handle) {
- /* XXX: Maybe instead of having OIO_READING we just test if
+void uv__read(uv_handle_t* handle) {
+ /* XXX: Maybe instead of having UV_READING we just test if
* handle->read_cb is NULL or not?
*/
- while (handle->read_cb && oio_flag_is_set(handle, OIO_READING)) {
+ while (handle->read_cb && uv_flag_is_set(handle, UV_READING)) {
assert(alloc_cb);
- oio_buf buf = alloc_cb(handle, 64 * 1024);
+ uv_buf buf = alloc_cb(handle, 64 * 1024);
assert(buf.len > 0);
assert(buf.base);
@@ -620,27 +620,27 @@ void oio__read(oio_handle_t* handle) {
/* Error */
if (errno == EAGAIN) {
/* Wait for the next one. */
- if (oio_flag_is_set(handle, OIO_READING)) {
+ if (uv_flag_is_set(handle, UV_READING)) {
ev_io_start(EV_DEFAULT_UC_ &handle->read_watcher);
}
- oio_err_new(handle, EAGAIN);
+ uv_err_new(handle, EAGAIN);
handle->read_cb(handle, 0, buf);
return;
} else {
- oio_err_new(handle, errno);
- oio_close(handle);
+ uv_err_new(handle, errno);
+ uv_close(handle);
handle->read_cb(handle, -1, buf);
assert(!ev_is_active(&handle->read_watcher));
return;
}
} else if (nread == 0) {
/* EOF */
- oio_err_new_artificial(handle, OIO_EOF);
+ uv_err_new_artificial(handle, UV_EOF);
ev_io_stop(EV_DEFAULT_UC_ &handle->read_watcher);
handle->read_cb(handle, -1, buf);
- if (oio_flag_is_set(handle, OIO_SHUT)) {
- oio_close(handle);
+ if (uv_flag_is_set(handle, UV_SHUT)) {
+ uv_close(handle);
}
return;
} else {
@@ -651,20 +651,20 @@ void oio__read(oio_handle_t* handle) {
}
-int oio_shutdown(oio_req_t* req) {
- oio_handle_t* handle = req->handle;
+int uv_shutdown(uv_req_t* req) {
+ uv_handle_t* handle = req->handle;
assert(handle->fd >= 0);
- if (oio_flag_is_set(handle, OIO_SHUT) ||
- oio_flag_is_set(handle, OIO_CLOSED) ||
- oio_flag_is_set(handle, OIO_CLOSING)) {
+ if (uv_flag_is_set(handle, UV_SHUT) ||
+ uv_flag_is_set(handle, UV_CLOSED) ||
+ uv_flag_is_set(handle, UV_CLOSING)) {
return -1;
}
handle->shutdown_req = req;
- req->type = OIO_SHUTDOWN;
+ req->type = UV_SHUTDOWN;
- oio_flag_set(handle, OIO_SHUTTING);
+ uv_flag_set(handle, UV_SHUTTING);
ev_io_start(EV_DEFAULT_UC_ &handle->write_watcher);
@@ -672,24 +672,24 @@ int oio_shutdown(oio_req_t* req) {
}
-void oio__tcp_io(EV_P_ ev_io* watcher, int revents) {
- oio_handle_t* handle = watcher->data;
+void uv__tcp_io(EV_P_ ev_io* watcher, int revents) {
+ uv_handle_t* handle = watcher->data;
assert(watcher == &handle->read_watcher ||
watcher == &handle->write_watcher);
assert(handle->fd >= 0);
- assert(!oio_flag_is_set(handle, OIO_CLOSING));
+ assert(!uv_flag_is_set(handle, UV_CLOSING));
if (handle->connect_req) {
- oio__tcp_connect(handle);
+ uv__tcp_connect(handle);
} else {
if (revents & EV_READ) {
- oio__read(handle);
+ uv__read(handle);
}
if (revents & EV_WRITE) {
- oio__write(handle);
+ uv__write(handle);
}
}
}
@@ -700,13 +700,13 @@ void oio__tcp_io(EV_P_ ev_io* watcher, int revents) {
* In order to determine if we've errored out or succeeded must call
* getsockopt.
*/
-static void oio__tcp_connect(oio_handle_t* handle) {
+static void uv__tcp_connect(uv_handle_t* handle) {
int error;
socklen_t errorsize = sizeof(int);
assert(handle->fd >= 0);
- oio_req_t* req = handle->connect_req;
+ uv_req_t* req = handle->connect_req;
assert(req);
if (handle->delayed_error) {
@@ -726,7 +726,7 @@ static void oio__tcp_connect(oio_handle_t* handle) {
/* Successful connection */
handle->connect_req = NULL;
- oio_connect_cb connect_cb = req->cb;
+ uv_connect_cb connect_cb = req->cb;
if (connect_cb) {
connect_cb(req, 0);
}
@@ -736,46 +736,46 @@ static void oio__tcp_connect(oio_handle_t* handle) {
return;
} else {
/* Error */
- oio_err_t err = oio_err_new(handle, error);
+ uv_err_t err = uv_err_new(handle, error);
handle->connect_req = NULL;
- oio_connect_cb connect_cb = req->cb;
+ uv_connect_cb connect_cb = req->cb;
if (connect_cb) {
connect_cb(req, -1);
}
- oio_close(handle);
+ uv_close(handle);
}
}
-int oio_connect(oio_req_t* req, struct sockaddr* addr) {
- oio_handle_t* handle = req->handle;
+int uv_connect(uv_req_t* req, struct sockaddr* addr) {
+ uv_handle_t* handle = req->handle;
if (handle->fd <= 0) {
int fd = socket(AF_INET, SOCK_STREAM, 0);
if (fd < 0) {
- oio_err_new(handle, errno);
+ uv_err_new(handle, errno);
return -1;
}
- if (oio_tcp_open(handle, fd)) {
+ if (uv_tcp_open(handle, fd)) {
close(fd);
return -2;
}
}
- req->type = OIO_CONNECT;
+ req->type = UV_CONNECT;
ngx_queue_init(&req->queue);
if (handle->connect_req) {
- oio_err_new(handle, EALREADY);
+ uv_err_new(handle, EALREADY);
return -1;
}
- if (handle->type != OIO_TCP) {
- oio_err_new(handle, ENOTSOCK);
+ if (handle->type != UV_TCP) {
+ uv_err_new(handle, ENOTSOCK);
return -1;
}
@@ -797,7 +797,7 @@ int oio_connect(oio_req_t* req, struct sockaddr* addr) {
break;
default:
- oio_err_new(handle, errno);
+ uv_err_new(handle, errno);
return -1;
}
}
@@ -813,7 +813,7 @@ int oio_connect(oio_req_t* req, struct sockaddr* addr) {
}
-static size_t oio__buf_count(oio_buf bufs[], int bufcnt) {
+static size_t uv__buf_count(uv_buf bufs[], int bufcnt) {
size_t total = 0;
int i;
@@ -826,28 +826,28 @@ static size_t oio__buf_count(oio_buf bufs[], int bufcnt) {
/* The buffers to be written must remain valid until the callback is called.
- * This is not required for the oio_buf array.
+ * This is not required for the uv_buf array.
*/
-int oio_write(oio_req_t* req, oio_buf bufs[], int bufcnt) {
- oio_handle_t* handle = req->handle;
+int uv_write(uv_req_t* req, uv_buf bufs[], int bufcnt) {
+ uv_handle_t* handle = req->handle;
assert(handle->fd >= 0);
ngx_queue_init(&req->queue);
- req->type = OIO_WRITE;
+ req->type = UV_WRITE;
/* TODO: Don't malloc for each write... */
- req->bufs = malloc(sizeof(oio_buf) * bufcnt);
- memcpy(req->bufs, bufs, bufcnt * sizeof(oio_buf));
+ req->bufs = malloc(sizeof(uv_buf) * bufcnt);
+ memcpy(req->bufs, bufs, bufcnt * sizeof(uv_buf));
req->bufcnt = bufcnt;
req->write_index = 0;
- handle->write_queue_size += oio__buf_count(bufs, bufcnt);
+ handle->write_queue_size += uv__buf_count(bufs, bufcnt);
/* Append the request to write_queue. */
ngx_queue_insert_tail(&handle->write_queue, &req->queue);
assert(!ngx_queue_empty(&handle->write_queue));
- assert(handle->write_watcher.cb == oio__tcp_io);
+ assert(handle->write_watcher.cb == uv__tcp_io);
assert(handle->write_watcher.data == handle);
assert(handle->write_watcher.fd == handle->fd);
@@ -857,18 +857,18 @@ int oio_write(oio_req_t* req, oio_buf bufs[], int bufcnt) {
}
-void oio_ref() {
+void uv_ref() {
ev_ref(EV_DEFAULT_UC);
}
-void oio_unref() {
+void uv_unref() {
ev_unref(EV_DEFAULT_UC);
}
-void oio__timeout(EV_P_ ev_timer* watcher, int revents) {
- oio_req_t* req = watcher->data;
+void uv__timeout(EV_P_ ev_timer* watcher, int revents) {
+ uv_req_t* req = watcher->data;
assert(watcher == &req->timer);
assert(EV_TIMER & revents);
@@ -877,36 +877,36 @@ void oio__timeout(EV_P_ ev_timer* watcher, int revents) {
assert(!ev_is_pending(watcher));
if (req->cb) {
- oio_timer_cb cb = req->cb;
+ uv_timer_cb cb = req->cb;
/* TODO skew */
cb(req, 0, 0);
}
}
-void oio_update_time() {
+void uv_update_time() {
ev_now_update(EV_DEFAULT_UC);
}
-int64_t oio_now() {
+int64_t uv_now() {
return (int64_t)(ev_now(EV_DEFAULT_UC) * 1000);
}
-int oio_timeout(oio_req_t* req, int64_t timeout) {
- ev_timer_init(&req->timer, oio__timeout, timeout / 1000.0, 0.0);
+int uv_timeout(uv_req_t* req, int64_t timeout) {
+ ev_timer_init(&req->timer, uv__timeout, timeout / 1000.0, 0.0);
ev_timer_start(EV_DEFAULT_UC_ &req->timer);
req->timer.data = req;
return 0;
}
-int oio_read_start(oio_handle_t* handle, oio_read_cb cb) {
- /* The OIO_READING flag is irrelevant of the state of the handle - it just
+int uv_read_start(uv_handle_t* handle, uv_read_cb cb) {
+ /* The UV_READING flag is irrelevant of the state of the handle - it just
* expresses the desired state of the user.
*/
- oio_flag_set(handle, OIO_READING);
+ uv_flag_set(handle, UV_READING);
/* TODO: try to do the read inline? */
/* TODO: keep track of handle state. If we've gotten a EOF then we should
@@ -915,17 +915,17 @@ int oio_read_start(oio_handle_t* handle, oio_read_cb cb) {
assert(handle->fd >= 0);
handle->read_cb = cb;
- /* These should have been set by oio_tcp_init. */
+ /* These should have been set by uv_tcp_init. */
assert(handle->read_watcher.data == handle);
- assert(handle->read_watcher.cb == oio__tcp_io);
+ assert(handle->read_watcher.cb == uv__tcp_io);
ev_io_start(EV_DEFAULT_UC_ &handle->read_watcher);
return 0;
}
-int oio_read_stop(oio_handle_t* handle) {
- oio_flag_unset(handle, OIO_READING);
+int uv_read_stop(uv_handle_t* handle) {
+ uv_flag_unset(handle, UV_READING);
ev_io_stop(EV_DEFAULT_UC_ &handle->read_watcher);
handle->read_cb = NULL;
@@ -933,32 +933,32 @@ int oio_read_stop(oio_handle_t* handle) {
}
-void oio_free(oio_handle_t* handle) {
+void uv_free(uv_handle_t* handle) {
free(handle);
/* lists? */
return;
}
-void oio_req_init(oio_req_t* req, oio_handle_t* handle, void* cb) {
- req->type = OIO_UNKNOWN_REQ;
+void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb) {
+ req->type = UV_UNKNOWN_REQ;
req->cb = cb;
req->handle = handle;
ngx_queue_init(&req->queue);
}
-static void oio__prepare(EV_P_ ev_prepare* w, int revents) {
- oio_handle_t* handle = (oio_handle_t*)(w->data);
+static void uv__prepare(EV_P_ ev_prepare* w, int revents) {
+ uv_handle_t* handle = (uv_handle_t*)(w->data);
if (handle->prepare_cb) handle->prepare_cb(handle, 0);
}
-int oio_prepare_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
- oio__handle_init(handle, OIO_PREPARE, close_cb, data);
+int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+ uv__handle_init(handle, UV_PREPARE, close_cb, data);
- ev_prepare_init(&handle->prepare_watcher, oio__prepare);
+ ev_prepare_init(&handle->prepare_watcher, uv__prepare);
handle->prepare_watcher.data = handle;
handle->prepare_cb = NULL;
@@ -967,31 +967,31 @@ int oio_prepare_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
}
-int oio_prepare_start(oio_handle_t* handle, oio_loop_cb cb) {
+int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb) {
handle->prepare_cb = cb;
ev_prepare_start(EV_DEFAULT_UC_ &handle->prepare_watcher);
return 0;
}
-int oio_prepare_stop(oio_handle_t* handle) {
+int uv_prepare_stop(uv_handle_t* handle) {
ev_prepare_stop(EV_DEFAULT_UC_ &handle->prepare_watcher);
return 0;
}
-static void oio__check(EV_P_ ev_check* w, int revents) {
- oio_handle_t* handle = (oio_handle_t*)(w->data);
+static void uv__check(EV_P_ ev_check* w, int revents) {
+ uv_handle_t* handle = (uv_handle_t*)(w->data);
if (handle->check_cb) handle->check_cb(handle, 0);
}
-int oio_check_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
- oio__handle_init(handle, OIO_CHECK, close_cb, data);
+int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+ uv__handle_init(handle, UV_CHECK, close_cb, data);
- ev_check_init(&handle->check_watcher, oio__check);
+ ev_check_init(&handle->check_watcher, uv__check);
handle->check_watcher.data = handle;
handle->check_cb = NULL;
@@ -1000,31 +1000,31 @@ int oio_check_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
}
-int oio_check_start(oio_handle_t* handle, oio_loop_cb cb) {
+int uv_check_start(uv_handle_t* handle, uv_loop_cb cb) {
handle->check_cb = cb;
ev_check_start(EV_DEFAULT_UC_ &handle->check_watcher);
return 0;
}
-int oio_check_stop(oio_handle_t* handle) {
+int uv_check_stop(uv_handle_t* handle) {
ev_prepare_stop(EV_DEFAULT_UC_ &handle->prepare_watcher);
return 0;
}
-static void oio__idle(EV_P_ ev_idle* w, int revents) {
- oio_handle_t* handle = (oio_handle_t*)(w->data);
+static void uv__idle(EV_P_ ev_idle* w, int revents) {
+ uv_handle_t* handle = (uv_handle_t*)(w->data);
if (handle->idle_cb) handle->idle_cb(handle, 0);
}
-int oio_idle_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
- oio__handle_init(handle, OIO_IDLE, close_cb, data);
+int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+ uv__handle_init(handle, UV_IDLE, close_cb, data);
- ev_idle_init(&handle->idle_watcher, oio__idle);
+ ev_idle_init(&handle->idle_watcher, uv__idle);
handle->idle_watcher.data = handle;
handle->idle_cb = NULL;
@@ -1033,31 +1033,31 @@ int oio_idle_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
}
-int oio_idle_start(oio_handle_t* handle, oio_loop_cb cb) {
+int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb) {
handle->idle_cb = cb;
ev_idle_start(EV_DEFAULT_UC_ &handle->idle_watcher);
return 0;
}
-int oio_idle_stop(oio_handle_t* handle) {
+int uv_idle_stop(uv_handle_t* handle) {
ev_idle_stop(EV_DEFAULT_UC_ &handle->idle_watcher);
return 0;
}
-static void oio__async(EV_P_ ev_async* w, int revents) {
- oio_handle_t* handle = (oio_handle_t*)(w->data);
+static void uv__async(EV_P_ ev_async* w, int revents) {
+ uv_handle_t* handle = (uv_handle_t*)(w->data);
if (handle->async_cb) handle->async_cb(handle, 0);
}
-int oio_async_init(oio_handle_t* handle, oio_async_cb async_cb,
- oio_close_cb close_cb, void* data) {
- oio__handle_init(handle, OIO_ASYNC, close_cb, data);
+int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
+ uv_close_cb close_cb, void* data) {
+ uv__handle_init(handle, UV_ASYNC, close_cb, data);
- ev_async_init(&handle->async_watcher, oio__async);
+ ev_async_init(&handle->async_watcher, uv__async);
handle->async_watcher.data = handle;
handle->async_cb = async_cb;
@@ -1069,6 +1069,6 @@ int oio_async_init(oio_handle_t* handle, oio_async_cb async_cb,
}
-int oio_async_send(oio_handle_t* handle) {
+int uv_async_send(uv_handle_t* handle) {
ev_async_send(EV_DEFAULT_UC_ &handle->async_watcher);
}
diff --git a/oio-unix.h b/uv-unix.h
similarity index 79%
rename from oio-unix.h
rename to uv-unix.h
index 98ef17e5..b247e583 100644
--- a/oio-unix.h
+++ b/uv-unix.h
@@ -19,8 +19,8 @@
* IN THE SOFTWARE.
*/
-#ifndef OIO_UNIX_H
-#define OIO_UNIX_H
+#ifndef UV_UNIX_H
+#define UV_UNIX_H
#include "ngx-queue.h"
@@ -35,44 +35,44 @@
typedef struct {
char* base;
size_t len;
-} oio_buf;
+} uv_buf;
-#define oio_req_private_fields \
+#define uv_req_private_fields \
int write_index; \
ev_timer timer; \
ngx_queue_t queue; \
- oio_buf* bufs; \
+ uv_buf* bufs; \
int bufcnt;
/* TODO: union or classes please! */
-#define oio_handle_private_fields \
+#define uv_handle_private_fields \
int fd; \
int flags; \
ev_idle next_watcher; \
-/* OIO_TCP */ \
+/* UV_TCP */ \
int delayed_error; \
- oio_read_cb read_cb; \
- oio_accept_cb accept_cb; \
+ uv_read_cb read_cb; \
+ uv_accept_cb accept_cb; \
int accepted_fd; \
- oio_req_t *connect_req; \
- oio_req_t *shutdown_req; \
+ uv_req_t *connect_req; \
+ uv_req_t *shutdown_req; \
ev_io read_watcher; \
ev_io write_watcher; \
ngx_queue_t write_queue; \
-/* OIO_PREPARE */ \
+/* UV_PREPARE */ \
ev_prepare prepare_watcher; \
- oio_loop_cb prepare_cb; \
-/* OIO_CHECK */ \
+ uv_loop_cb prepare_cb; \
+/* UV_CHECK */ \
ev_check check_watcher; \
- oio_loop_cb check_cb; \
-/* OIO_IDLE */ \
+ uv_loop_cb check_cb; \
+/* UV_IDLE */ \
ev_idle idle_watcher; \
- oio_loop_cb idle_cb; \
-/* OIO_ASYNC */ \
+ uv_loop_cb idle_cb; \
+/* UV_ASYNC */ \
ev_async async_watcher; \
- oio_loop_cb async_cb;
+ uv_loop_cb async_cb;
-#endif /* OIO_UNIX_H */
+#endif /* UV_UNIX_H */
diff --git a/oio-win.c b/uv-win.c
similarity index 53%
rename from oio-win.c
rename to uv-win.c
index a6fe6686..fc406517 100644
--- a/oio-win.c
+++ b/uv-win.c
@@ -25,7 +25,7 @@
#include
#include
-#include "oio.h"
+#include "uv.h"
#include "tree.h"
/*
@@ -117,85 +117,85 @@ static LPFN_TRANSMITFILE pTransmitFile;
/*
- * Private oio_handle flags
+ * Private uv_handle flags
*/
-#define OIO_HANDLE_CLOSING 0x0001
-#define OIO_HANDLE_CLOSED 0x0002
-#define OIO_HANDLE_BOUND 0x0004
-#define OIO_HANDLE_LISTENING 0x0008
-#define OIO_HANDLE_CONNECTION 0x0010
-#define OIO_HANDLE_CONNECTED 0x0020
-#define OIO_HANDLE_READING 0x0040
-#define OIO_HANDLE_ACTIVE 0x0040
-#define OIO_HANDLE_EOF 0x0080
-#define OIO_HANDLE_SHUTTING 0x0100
-#define OIO_HANDLE_SHUT 0x0200
-#define OIO_HANDLE_ENDGAME_QUEUED 0x0400
-#define OIO_HANDLE_BIND_ERROR 0x1000
+#define UV_HANDLE_CLOSING 0x0001
+#define UV_HANDLE_CLOSED 0x0002
+#define UV_HANDLE_BOUND 0x0004
+#define UV_HANDLE_LISTENING 0x0008
+#define UV_HANDLE_CONNECTION 0x0010
+#define UV_HANDLE_CONNECTED 0x0020
+#define UV_HANDLE_READING 0x0040
+#define UV_HANDLE_ACTIVE 0x0040
+#define UV_HANDLE_EOF 0x0080
+#define UV_HANDLE_SHUTTING 0x0100
+#define UV_HANDLE_SHUT 0x0200
+#define UV_HANDLE_ENDGAME_QUEUED 0x0400
+#define UV_HANDLE_BIND_ERROR 0x1000
/*
- * Private oio_req flags.
+ * Private uv_req flags.
*/
/* The request is currently queued. */
-#define OIO_REQ_PENDING 0x01
+#define UV_REQ_PENDING 0x01
/* Binary tree used to keep the list of timers sorted. */
-static int oio_timer_compare(oio_req_t* t1, oio_req_t* t2);
-RB_HEAD(oio_timer_s, oio_req_s);
-RB_PROTOTYPE_STATIC(oio_timer_s, oio_req_s, tree_entry, oio_timer_compare);
+static int uv_timer_compare(uv_req_t* t1, uv_req_t* t2);
+RB_HEAD(uv_timer_s, uv_req_s);
+RB_PROTOTYPE_STATIC(uv_timer_s, uv_req_s, tree_entry, uv_timer_compare);
/* The head of the timers tree */
-static struct oio_timer_s oio_timers_ = RB_INITIALIZER(oio_timers_);
+static struct uv_timer_s uv_timers_ = RB_INITIALIZER(uv_timers_);
-/* Lists of active oio_prepare / oio_check / oio_idle watchers */
-static oio_handle_t* oio_prepare_handles_ = NULL;
-static oio_handle_t* oio_check_handles_ = NULL;
-static oio_handle_t* oio_idle_handles_ = NULL;
+/* Lists of active uv_prepare / uv_check / uv_idle watchers */
+static uv_handle_t* uv_prepare_handles_ = NULL;
+static uv_handle_t* uv_check_handles_ = NULL;
+static uv_handle_t* uv_idle_handles_ = NULL;
/* This pointer will refer to the prepare/check/idle handle whose callback */
/* is scheduled to be called next. This is needed to allow safe removal */
/* from one of the lists above while that list being iterated. */
-static oio_handle_t* oio_next_loop_handle_ = NULL;
+static uv_handle_t* uv_next_loop_handle_ = NULL;
/* Head of a single-linked list of closed handles */
-static oio_handle_t* oio_endgame_handles_ = NULL;
+static uv_handle_t* uv_endgame_handles_ = NULL;
/* The current time according to the event loop. in msecs. */
-static int64_t oio_now_ = 0;
-static int64_t oio_ticks_per_msec_ = 0;
+static int64_t uv_now_ = 0;
+static int64_t uv_ticks_per_msec_ = 0;
/*
* Global I/O completion port
*/
-static HANDLE oio_iocp_;
+static HANDLE uv_iocp_;
/* Global error code */
-static const oio_err_t oio_ok_ = { OIO_OK, ERROR_SUCCESS };
-static oio_err_t oio_last_error_ = { OIO_OK, ERROR_SUCCESS };
+static const uv_err_t uv_ok_ = { UV_OK, ERROR_SUCCESS };
+static uv_err_t uv_last_error_ = { UV_OK, ERROR_SUCCESS };
/* Error message string */
-static char* oio_err_str_ = NULL;
+static char* uv_err_str_ = NULL;
/* Global alloc function */
-oio_alloc_cb oio_alloc_ = NULL;
+uv_alloc_cb uv_alloc_ = NULL;
/* Reference count that keeps the event loop alive */
-static int oio_refs_ = 0;
+static int uv_refs_ = 0;
/* Ip address used to bind to any port at any interface */
-static struct sockaddr_in oio_addr_ip4_any_;
+static struct sockaddr_in uv_addr_ip4_any_;
-/* A zero-size buffer for use by oio_read */
-static char oio_zero_[] = "";
+/* A zero-size buffer for use by uv_read */
+static char uv_zero_[] = "";
/* Atomic set operation on char */
@@ -207,14 +207,14 @@ static char oio_zero_[] = "";
/* target to be aligned. */
#pragma intrinsic(_InterlockedOr8)
-static char __declspec(inline) oio_atomic_exchange_set(char volatile* target) {
+static char __declspec(inline) uv_atomic_exchange_set(char volatile* target) {
return _InterlockedOr8(target, 1);
}
#else /* GCC */
/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
-static inline char oio_atomic_exchange_set(char volatile* target) {
+static inline char uv_atomic_exchange_set(char volatile* target) {
const char one = 1;
char old_value;
__asm__ __volatile__ ("lock xchgb %0, %1\n\t"
@@ -230,7 +230,7 @@ static inline char oio_atomic_exchange_set(char volatile* target) {
/*
* Display an error message and abort the event loop.
*/
-static void oio_fatal_error(const int errorno, const char* syscall) {
+static void uv_fatal_error(const int errorno, const char* syscall) {
char* buf = NULL;
const char* errmsg;
@@ -261,68 +261,68 @@ static void oio_fatal_error(const int errorno, const char* syscall) {
}
-oio_err_t oio_last_error() {
- return oio_last_error_;
+uv_err_t uv_last_error() {
+ return uv_last_error_;
}
-char* oio_strerror(oio_err_t err) {
- if (oio_err_str_ != NULL) {
- LocalFree((void*) oio_err_str_);
+char* uv_strerror(uv_err_t err) {
+ if (uv_err_str_ != NULL) {
+ LocalFree((void*) uv_err_str_);
}
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, err.sys_errno_,
- MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&oio_err_str_, 0, NULL);
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&uv_err_str_, 0, NULL);
- if (oio_err_str_) {
- return oio_err_str_;
+ if (uv_err_str_) {
+ return uv_err_str_;
} else {
return "Unknown error";
}
}
-static oio_err_code oio_translate_sys_error(int sys_errno) {
+static uv_err_code uv_translate_sys_error(int sys_errno) {
switch (sys_errno) {
- case ERROR_SUCCESS: return OIO_OK;
- case ERROR_NOACCESS: return OIO_EACCESS;
- case WSAEACCES: return OIO_EACCESS;
- case ERROR_ADDRESS_ALREADY_ASSOCIATED: return OIO_EADDRINUSE;
- case WSAEADDRINUSE: return OIO_EADDRINUSE;
- case WSAEADDRNOTAVAIL: return OIO_EADDRNOTAVAIL;
- case WSAEWOULDBLOCK: return OIO_EAGAIN;
- case WSAEALREADY: return OIO_EALREADY;
- case ERROR_CONNECTION_REFUSED: return OIO_ECONNREFUSED;
- case WSAECONNREFUSED: return OIO_ECONNREFUSED;
- case WSAEFAULT: return OIO_EFAULT;
- case WSAEINVAL: return OIO_EINVAL;
- case ERROR_TOO_MANY_OPEN_FILES: return OIO_EMFILE;
- case WSAEMFILE: return OIO_EMFILE;
- case ERROR_OUTOFMEMORY: return OIO_ENOMEM;
- default: return OIO_UNKNOWN;
+ case ERROR_SUCCESS: return UV_OK;
+ case ERROR_NOACCESS: return UV_EACCESS;
+ case WSAEACCES: return UV_EACCESS;
+ case ERROR_ADDRESS_ALREADY_ASSOCIATED: return UV_EADDRINUSE;
+ case WSAEADDRINUSE: return UV_EADDRINUSE;
+ case WSAEADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
+ case WSAEWOULDBLOCK: return UV_EAGAIN;
+ case WSAEALREADY: return UV_EALREADY;
+ case ERROR_CONNECTION_REFUSED: return UV_ECONNREFUSED;
+ case WSAECONNREFUSED: return UV_ECONNREFUSED;
+ case WSAEFAULT: return UV_EFAULT;
+ case WSAEINVAL: return UV_EINVAL;
+ case ERROR_TOO_MANY_OPEN_FILES: return UV_EMFILE;
+ case WSAEMFILE: return UV_EMFILE;
+ case ERROR_OUTOFMEMORY: return UV_ENOMEM;
+ default: return UV_UNKNOWN;
}
}
-static oio_err_t oio_new_sys_error(int sys_errno) {
- oio_err_t e;
- e.code = oio_translate_sys_error(sys_errno);
+static uv_err_t uv_new_sys_error(int sys_errno) {
+ uv_err_t e;
+ e.code = uv_translate_sys_error(sys_errno);
e.sys_errno_ = sys_errno;
return e;
}
-static void oio_set_sys_error(int sys_errno) {
- oio_last_error_.code = oio_translate_sys_error(sys_errno);
- oio_last_error_.sys_errno_ = sys_errno;
+static void uv_set_sys_error(int sys_errno) {
+ uv_last_error_.code = uv_translate_sys_error(sys_errno);
+ uv_last_error_.sys_errno_ = sys_errno;
}
/*
* Retrieves the pointer to a winsock extension function.
*/
-static void oio_get_extension_function(SOCKET socket, GUID guid,
+static void uv_get_extension_function(SOCKET socket, GUID guid,
void **target) {
DWORD result, bytes;
@@ -338,13 +338,13 @@ static void oio_get_extension_function(SOCKET socket, GUID guid,
if (result == SOCKET_ERROR) {
*target = NULL;
- oio_fatal_error(WSAGetLastError(),
+ uv_fatal_error(WSAGetLastError(),
"WSAIoctl(SIO_GET_EXTENSION_FUNCTION_POINTER)");
}
}
-void oio_init(oio_alloc_cb alloc_cb) {
+void uv_init(uv_alloc_cb alloc_cb) {
const GUID wsaid_connectex = WSAID_CONNECTEX;
const GUID wsaid_acceptex = WSAID_ACCEPTEX;
const GUID wsaid_getacceptexsockaddrs = WSAID_GETACCEPTEXSOCKADDRS;
@@ -356,72 +356,72 @@ void oio_init(oio_alloc_cb alloc_cb) {
LARGE_INTEGER timer_frequency;
SOCKET dummy;
- oio_alloc_ = alloc_cb;
+ uv_alloc_ = alloc_cb;
/* Initialize winsock */
errorno = WSAStartup(MAKEWORD(2, 2), &wsa_data);
if (errorno != 0) {
- oio_fatal_error(errorno, "WSAStartup");
+ uv_fatal_error(errorno, "WSAStartup");
}
/* Set implicit binding address used by connectEx */
- oio_addr_ip4_any_ = oio_ip4_addr("0.0.0.0", 0);
+ uv_addr_ip4_any_ = uv_ip4_addr("0.0.0.0", 0);
/* Retrieve the needed winsock extension function pointers. */
dummy = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);
if (dummy == INVALID_SOCKET) {
- oio_fatal_error(WSAGetLastError(), "socket");
+ uv_fatal_error(WSAGetLastError(), "socket");
}
- oio_get_extension_function(dummy,
+ uv_get_extension_function(dummy,
wsaid_connectex,
(void**)&pConnectEx);
- oio_get_extension_function(dummy,
+ uv_get_extension_function(dummy,
wsaid_acceptex,
(void**)&pAcceptEx);
- oio_get_extension_function(dummy,
+ uv_get_extension_function(dummy,
wsaid_getacceptexsockaddrs,
(void**)&pGetAcceptExSockAddrs);
- oio_get_extension_function(dummy,
+ uv_get_extension_function(dummy,
wsaid_disconnectex,
(void**)&pDisconnectEx);
- oio_get_extension_function(dummy,
+ uv_get_extension_function(dummy,
wsaid_transmitfile,
(void**)&pTransmitFile);
if (closesocket(dummy) == SOCKET_ERROR) {
- oio_fatal_error(WSAGetLastError(), "closesocket");
+ uv_fatal_error(WSAGetLastError(), "closesocket");
}
/* Create an I/O completion port */
- oio_iocp_ = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
- if (oio_iocp_ == NULL) {
- oio_fatal_error(GetLastError(), "CreateIoCompletionPort");
+ uv_iocp_ = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
+ if (uv_iocp_ == NULL) {
+ uv_fatal_error(GetLastError(), "CreateIoCompletionPort");
}
/* Initialize the event loop time */
if (!QueryPerformanceFrequency(&timer_frequency))
- oio_fatal_error(GetLastError(), "QueryPerformanceFrequency");
- oio_ticks_per_msec_ = timer_frequency.QuadPart / 1000;
+ uv_fatal_error(GetLastError(), "QueryPerformanceFrequency");
+ uv_ticks_per_msec_ = timer_frequency.QuadPart / 1000;
- oio_update_time();
+ uv_update_time();
}
-void oio_req_init(oio_req_t* req, oio_handle_t* handle, void* cb) {
- req->type = OIO_UNKNOWN_REQ;
+void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb) {
+ req->type = UV_UNKNOWN_REQ;
req->flags = 0;
req->handle = handle;
req->cb = cb;
}
-static oio_req_t* oio_overlapped_to_req(OVERLAPPED* overlapped) {
- return CONTAINING_RECORD(overlapped, oio_req_t, overlapped);
+static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) {
+ return CONTAINING_RECORD(overlapped, uv_req_t, overlapped);
}
-static int oio_tcp_init_socket(oio_handle_t* handle, oio_close_cb close_cb,
+static int uv_tcp_init_socket(uv_handle_t* handle, uv_close_cb close_cb,
void* data, SOCKET socket) {
DWORD yes = 1;
@@ -429,58 +429,58 @@ static int oio_tcp_init_socket(oio_handle_t* handle, oio_close_cb close_cb,
handle->close_cb = close_cb;
handle->data = data;
handle->write_queue_size = 0;
- handle->type = OIO_TCP;
+ handle->type = UV_TCP;
handle->flags = 0;
handle->reqs_pending = 0;
- handle->error = oio_ok_;
+ handle->error = uv_ok_;
handle->accept_socket = INVALID_SOCKET;
/* Set the socket to nonblocking mode */
if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
- oio_set_sys_error(WSAGetLastError());
+ uv_set_sys_error(WSAGetLastError());
return -1;
}
/* Make the socket non-inheritable */
if (!SetHandleInformation((HANDLE)socket, HANDLE_FLAG_INHERIT, 0)) {
- oio_set_sys_error(GetLastError());
+ uv_set_sys_error(GetLastError());
return -1;
}
/* Associate it with the I/O completion port. */
- /* Use oio_handle_t pointer as completion key. */
+ /* Use uv_handle_t pointer as completion key. */
if (CreateIoCompletionPort((HANDLE)socket,
- oio_iocp_,
+ uv_iocp_,
(ULONG_PTR)socket,
0) == NULL) {
- oio_set_sys_error(GetLastError());
+ uv_set_sys_error(GetLastError());
return -1;
}
- oio_refs_++;
+ uv_refs_++;
return 0;
}
-static void oio_tcp_init_connection(oio_handle_t* handle) {
- handle->flags |= OIO_HANDLE_CONNECTION;
+static void uv_tcp_init_connection(uv_handle_t* handle) {
+ handle->flags |= UV_HANDLE_CONNECTION;
handle->write_reqs_pending = 0;
- oio_req_init(&(handle->read_req), handle, NULL);
+ uv_req_init(&(handle->read_req), handle, NULL);
}
-int oio_tcp_init(oio_handle_t* handle, oio_close_cb close_cb,
+int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb,
void* data) {
SOCKET sock;
sock = socket(AF_INET, SOCK_STREAM, 0);
if (handle->socket == INVALID_SOCKET) {
- oio_set_sys_error(WSAGetLastError());
+ uv_set_sys_error(WSAGetLastError());
return -1;
}
- if (oio_tcp_init_socket(handle, close_cb, data, sock) == -1) {
+ if (uv_tcp_init_socket(handle, close_cb, data, sock) == -1) {
closesocket(sock);
return -1;
}
@@ -489,106 +489,106 @@ int oio_tcp_init(oio_handle_t* handle, oio_close_cb close_cb,
}
-static void oio_tcp_endgame(oio_handle_t* handle) {
- oio_err_t err;
+static void uv_tcp_endgame(uv_handle_t* handle) {
+ uv_err_t err;
int status;
- if (handle->flags & OIO_HANDLE_SHUTTING &&
- !(handle->flags & OIO_HANDLE_SHUT) &&
+ if (handle->flags & UV_HANDLE_SHUTTING &&
+ !(handle->flags & UV_HANDLE_SHUT) &&
handle->write_reqs_pending == 0) {
if (shutdown(handle->socket, SD_SEND) != SOCKET_ERROR) {
status = 0;
- handle->flags |= OIO_HANDLE_SHUT;
+ handle->flags |= UV_HANDLE_SHUT;
} else {
status = -1;
- err = oio_new_sys_error(WSAGetLastError());
+ err = uv_new_sys_error(WSAGetLastError());
}
if (handle->shutdown_req->cb) {
- handle->shutdown_req->flags &= ~OIO_REQ_PENDING;
+ handle->shutdown_req->flags &= ~UV_REQ_PENDING;
if (status == -1) {
- oio_last_error_ = err;
+ uv_last_error_ = err;
}
- ((oio_shutdown_cb)handle->shutdown_req->cb)(handle->shutdown_req, status);
+ ((uv_shutdown_cb)handle->shutdown_req->cb)(handle->shutdown_req, status);
}
handle->reqs_pending--;
}
- if (handle->flags & OIO_HANDLE_EOF &&
- handle->flags & OIO_HANDLE_SHUT &&
- !(handle->flags & OIO_HANDLE_CLOSING)) {
- /* Because oio_close will add the handle to the endgame_handles list, */
+ if (handle->flags & UV_HANDLE_EOF &&
+ handle->flags & UV_HANDLE_SHUT &&
+ !(handle->flags & UV_HANDLE_CLOSING)) {
+ /* Because uv_close will add the handle to the endgame_handles list, */
/* return here and call the close cb the next time. */
- oio_close(handle);
+ uv_close(handle);
return;
}
- if (handle->flags & OIO_HANDLE_CLOSING &&
+ if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
- assert(!(handle->flags & OIO_HANDLE_CLOSED));
- handle->flags |= OIO_HANDLE_CLOSED;
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
- oio_last_error_ = handle->error;
- handle->close_cb(handle, handle->error.code == OIO_OK ? 0 : 1);
+ uv_last_error_ = handle->error;
+ handle->close_cb(handle, handle->error.code == UV_OK ? 0 : 1);
}
- oio_refs_--;
+ uv_refs_--;
}
}
-static void oio_loop_endgame(oio_handle_t* handle) {
- if (handle->flags & OIO_HANDLE_CLOSING) {
- assert(!(handle->flags & OIO_HANDLE_CLOSED));
- handle->flags |= OIO_HANDLE_CLOSED;
+static void uv_loop_endgame(uv_handle_t* handle) {
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
handle->close_cb(handle, 0);
}
- oio_refs_--;
+ uv_refs_--;
}
}
-static void oio_async_endgame(oio_handle_t* handle) {
- if (handle->flags & OIO_HANDLE_CLOSING &&
+static void uv_async_endgame(uv_handle_t* handle) {
+ if (handle->flags & UV_HANDLE_CLOSING &&
!handle->async_sent) {
- assert(!(handle->flags & OIO_HANDLE_CLOSED));
- handle->flags |= OIO_HANDLE_CLOSED;
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
handle->close_cb(handle, 0);
}
- oio_refs_--;
+ uv_refs_--;
}
}
-static void oio_call_endgames() {
- oio_handle_t* handle;
+static void uv_call_endgames() {
+ uv_handle_t* handle;
- while (oio_endgame_handles_) {
- handle = oio_endgame_handles_;
- oio_endgame_handles_ = handle->endgame_next;
+ while (uv_endgame_handles_) {
+ handle = uv_endgame_handles_;
+ uv_endgame_handles_ = handle->endgame_next;
- handle->flags &= ~OIO_HANDLE_ENDGAME_QUEUED;
+ handle->flags &= ~UV_HANDLE_ENDGAME_QUEUED;
switch (handle->type) {
- case OIO_TCP:
- oio_tcp_endgame(handle);
+ case UV_TCP:
+ uv_tcp_endgame(handle);
break;
- case OIO_PREPARE:
- case OIO_CHECK:
- case OIO_IDLE:
- oio_loop_endgame(handle);
+ case UV_PREPARE:
+ case UV_CHECK:
+ case UV_IDLE:
+ uv_loop_endgame(handle);
break;
- case OIO_ASYNC:
- oio_async_endgame(handle);
+ case UV_ASYNC:
+ uv_async_endgame(handle);
break;
default:
@@ -599,51 +599,51 @@ static void oio_call_endgames() {
}
-static void oio_want_endgame(oio_handle_t* handle) {
- if (!(handle->flags & OIO_HANDLE_ENDGAME_QUEUED)) {
- handle->flags |= OIO_HANDLE_ENDGAME_QUEUED;
+static void uv_want_endgame(uv_handle_t* handle) {
+ if (!(handle->flags & UV_HANDLE_ENDGAME_QUEUED)) {
+ handle->flags |= UV_HANDLE_ENDGAME_QUEUED;
- handle->endgame_next = oio_endgame_handles_;
- oio_endgame_handles_ = handle;
+ handle->endgame_next = uv_endgame_handles_;
+ uv_endgame_handles_ = handle;
}
}
-static int oio_close_error(oio_handle_t* handle, oio_err_t e) {
- if (handle->flags & OIO_HANDLE_CLOSING) {
+static int uv_close_error(uv_handle_t* handle, uv_err_t e) {
+ if (handle->flags & UV_HANDLE_CLOSING) {
return 0;
}
handle->error = e;
- handle->flags |= OIO_HANDLE_CLOSING;
+ handle->flags |= UV_HANDLE_CLOSING;
/* Handle-specific close actions */
switch (handle->type) {
- case OIO_TCP:
+ case UV_TCP:
closesocket(handle->socket);
if (handle->reqs_pending == 0) {
- oio_want_endgame(handle);
+ uv_want_endgame(handle);
}
return 0;
- case OIO_PREPARE:
- oio_prepare_stop(handle);
- oio_want_endgame(handle);
+ case UV_PREPARE:
+ uv_prepare_stop(handle);
+ uv_want_endgame(handle);
return 0;
- case OIO_CHECK:
- oio_check_stop(handle);
- oio_want_endgame(handle);
+ case UV_CHECK:
+ uv_check_stop(handle);
+ uv_want_endgame(handle);
return 0;
- case OIO_IDLE:
- oio_idle_stop(handle);
- oio_want_endgame(handle);
+ case UV_IDLE:
+ uv_idle_stop(handle);
+ uv_want_endgame(handle);
return 0;
- case OIO_ASYNC:
+ case UV_ASYNC:
if (!handle->async_sent) {
- oio_want_endgame(handle);
+ uv_want_endgame(handle);
}
return 0;
@@ -655,12 +655,12 @@ static int oio_close_error(oio_handle_t* handle, oio_err_t e) {
}
-int oio_close(oio_handle_t* handle) {
- return oio_close_error(handle, oio_ok_);
+int uv_close(uv_handle_t* handle) {
+ return uv_close_error(handle, uv_ok_);
}
-struct sockaddr_in oio_ip4_addr(char* ip, int port) {
+struct sockaddr_in uv_ip4_addr(char* ip, int port) {
struct sockaddr_in addr;
addr.sin_family = AF_INET;
@@ -671,7 +671,7 @@ struct sockaddr_in oio_ip4_addr(char* ip, int port) {
}
-int oio_bind(oio_handle_t* handle, struct sockaddr* addr) {
+int uv_bind(uv_handle_t* handle, struct sockaddr* addr) {
int addrsize;
DWORD err;
@@ -680,7 +680,7 @@ int oio_bind(oio_handle_t* handle, struct sockaddr* addr) {
} else if (addr->sa_family == AF_INET6) {
addrsize = sizeof(struct sockaddr_in6);
} else {
- oio_set_sys_error(WSAEFAULT);
+ uv_set_sys_error(WSAEFAULT);
return -1;
}
@@ -688,40 +688,40 @@ int oio_bind(oio_handle_t* handle, struct sockaddr* addr) {
err = WSAGetLastError();
if (err == WSAEADDRINUSE) {
/* Some errors are not to be reported until connect() or listen() */
- handle->error = oio_new_sys_error(err);
- handle->flags |= OIO_HANDLE_BIND_ERROR;
+ handle->error = uv_new_sys_error(err);
+ handle->flags |= UV_HANDLE_BIND_ERROR;
} else {
- oio_set_sys_error(err);
+ uv_set_sys_error(err);
return -1;
}
}
- handle->flags |= OIO_HANDLE_BOUND;
+ handle->flags |= UV_HANDLE_BOUND;
return 0;
}
-static void oio_queue_accept(oio_handle_t* handle) {
- oio_req_t* req;
+static void uv_queue_accept(uv_handle_t* handle) {
+ uv_req_t* req;
BOOL success;
DWORD bytes;
SOCKET accept_socket;
- assert(handle->flags & OIO_HANDLE_LISTENING);
+ assert(handle->flags & UV_HANDLE_LISTENING);
assert(handle->accept_socket == INVALID_SOCKET);
accept_socket = socket(AF_INET, SOCK_STREAM, 0);
if (accept_socket == INVALID_SOCKET) {
- oio_close_error(handle, oio_new_sys_error(WSAGetLastError()));
+ uv_close_error(handle, uv_new_sys_error(WSAGetLastError()));
return;
}
- /* Prepare the oio_req and OVERLAPPED structures. */
+ /* Prepare the uv_req and OVERLAPPED structures. */
req = &handle->accept_req;
- assert(!(req->flags & OIO_REQ_PENDING));
- req->type = OIO_ACCEPT;
- req->flags |= OIO_REQ_PENDING;
+ assert(!(req->flags & UV_REQ_PENDING));
+ req->type = UV_ACCEPT;
+ req->flags |= UV_REQ_PENDING;
memset(&(req->overlapped), 0, sizeof(req->overlapped));
success = pAcceptEx(handle->socket,
@@ -734,35 +734,35 @@ static void oio_queue_accept(oio_handle_t* handle) {
&req->overlapped);
if (!success && WSAGetLastError() != ERROR_IO_PENDING) {
- oio_set_sys_error(WSAGetLastError());
+ uv_set_sys_error(WSAGetLastError());
/* destroy the preallocated client handle */
closesocket(accept_socket);
/* destroy ourselves */
- oio_close_error(handle, oio_last_error_);
+ uv_close_error(handle, uv_last_error_);
return;
}
handle->accept_socket = accept_socket;
handle->reqs_pending++;
- req->flags |= OIO_REQ_PENDING;
+ req->flags |= UV_REQ_PENDING;
}
-static void oio_queue_read(oio_handle_t* handle) {
- oio_req_t *req;
- oio_buf buf;
+static void uv_queue_read(uv_handle_t* handle) {
+ uv_req_t *req;
+ uv_buf buf;
int result;
DWORD bytes, flags;
- assert(handle->flags & OIO_HANDLE_READING);
+ assert(handle->flags & UV_HANDLE_READING);
req = &handle->read_req;
- assert(!(req->flags & OIO_REQ_PENDING));
+ assert(!(req->flags & UV_REQ_PENDING));
memset(&req->overlapped, 0, sizeof(req->overlapped));
- req->type = OIO_READ;
+ req->type = UV_READ;
- buf.base = (char*) &oio_zero_;
+ buf.base = (char*) &uv_zero_;
buf.len = 0;
flags = 0;
@@ -774,125 +774,125 @@ static void oio_queue_read(oio_handle_t* handle) {
&req->overlapped,
NULL);
if (result != 0 && WSAGetLastError() != ERROR_IO_PENDING) {
- oio_set_sys_error(WSAGetLastError());
- oio_close_error(handle, oio_last_error_);
+ uv_set_sys_error(WSAGetLastError());
+ uv_close_error(handle, uv_last_error_);
return;
}
- req->flags |= OIO_REQ_PENDING;
+ req->flags |= UV_REQ_PENDING;
handle->reqs_pending++;
}
-int oio_listen(oio_handle_t* handle, int backlog, oio_accept_cb cb) {
+int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb) {
assert(backlog > 0);
- if (handle->flags & OIO_HANDLE_BIND_ERROR) {
- oio_last_error_ = handle->error;
+ if (handle->flags & UV_HANDLE_BIND_ERROR) {
+ uv_last_error_ = handle->error;
return -1;
}
- if (handle->flags & OIO_HANDLE_LISTENING ||
- handle->flags & OIO_HANDLE_READING) {
+ if (handle->flags & UV_HANDLE_LISTENING ||
+ handle->flags & UV_HANDLE_READING) {
/* Already listening. */
- oio_set_sys_error(WSAEALREADY);
+ uv_set_sys_error(WSAEALREADY);
return -1;
}
if (listen(handle->socket, backlog) == SOCKET_ERROR) {
- oio_set_sys_error(WSAGetLastError());
+ uv_set_sys_error(WSAGetLastError());
return -1;
}
- handle->flags |= OIO_HANDLE_LISTENING;
+ handle->flags |= UV_HANDLE_LISTENING;
handle->accept_cb = cb;
- oio_req_init(&(handle->accept_req), handle, NULL);
- oio_queue_accept(handle);
+ uv_req_init(&(handle->accept_req), handle, NULL);
+ uv_queue_accept(handle);
return 0;
}
-int oio_accept(oio_handle_t* server, oio_handle_t* client,
- oio_close_cb close_cb, void* data) {
+int uv_accept(uv_handle_t* server, uv_handle_t* client,
+ uv_close_cb close_cb, void* data) {
int rv = 0;
if (server->accept_socket == INVALID_SOCKET) {
- oio_set_sys_error(WSAENOTCONN);
+ uv_set_sys_error(WSAENOTCONN);
return -1;
}
- if (oio_tcp_init_socket(client, close_cb, data, server->accept_socket) == -1) {
- oio_fatal_error(oio_last_error_.sys_errno_, "init");
+ if (uv_tcp_init_socket(client, close_cb, data, server->accept_socket) == -1) {
+ uv_fatal_error(uv_last_error_.sys_errno_, "init");
closesocket(server->accept_socket);
rv = -1;
}
- oio_tcp_init_connection(client);
+ uv_tcp_init_connection(client);
server->accept_socket = INVALID_SOCKET;
- if (!(server->flags & OIO_HANDLE_CLOSING)) {
- oio_queue_accept(server);
+ if (!(server->flags & UV_HANDLE_CLOSING)) {
+ uv_queue_accept(server);
}
return rv;
}
-int oio_read_start(oio_handle_t* handle, oio_read_cb cb) {
- if (!(handle->flags & OIO_HANDLE_CONNECTION)) {
- oio_set_sys_error(WSAEINVAL);
+int uv_read_start(uv_handle_t* handle, uv_read_cb cb) {
+ if (!(handle->flags & UV_HANDLE_CONNECTION)) {
+ uv_set_sys_error(WSAEINVAL);
return -1;
}
- if (handle->flags & OIO_HANDLE_READING) {
- oio_set_sys_error(WSAEALREADY);
+ if (handle->flags & UV_HANDLE_READING) {
+ uv_set_sys_error(WSAEALREADY);
return -1;
}
- if (handle->flags & OIO_HANDLE_EOF) {
- oio_set_sys_error(WSAESHUTDOWN);
+ if (handle->flags & UV_HANDLE_EOF) {
+ uv_set_sys_error(WSAESHUTDOWN);
return -1;
}
- handle->flags |= OIO_HANDLE_READING;
+ handle->flags |= UV_HANDLE_READING;
handle->read_cb = cb;
/* If reading was stopped and then started again, there could stell be a */
/* read request pending. */
- if (!(handle->read_req.flags & OIO_REQ_PENDING))
- oio_queue_read(handle);
+ if (!(handle->read_req.flags & UV_REQ_PENDING))
+ uv_queue_read(handle);
return 0;
}
-int oio_read_stop(oio_handle_t* handle) {
- handle->flags &= ~OIO_HANDLE_READING;
+int uv_read_stop(uv_handle_t* handle) {
+ handle->flags &= ~UV_HANDLE_READING;
return 0;
}
-int oio_connect(oio_req_t* req, struct sockaddr* addr) {
+int uv_connect(uv_req_t* req, struct sockaddr* addr) {
int addrsize;
BOOL success;
DWORD bytes;
- oio_handle_t* handle = req->handle;
+ uv_handle_t* handle = req->handle;
- assert(!(req->flags & OIO_REQ_PENDING));
+ assert(!(req->flags & UV_REQ_PENDING));
- if (handle->flags & OIO_HANDLE_BIND_ERROR) {
- oio_last_error_ = handle->error;
+ if (handle->flags & UV_HANDLE_BIND_ERROR) {
+ uv_last_error_ = handle->error;
return -1;
}
if (addr->sa_family == AF_INET) {
addrsize = sizeof(struct sockaddr_in);
- if (!(handle->flags & OIO_HANDLE_BOUND) &&
- oio_bind(handle, (struct sockaddr*)&oio_addr_ip4_any_) < 0)
+ if (!(handle->flags & UV_HANDLE_BOUND) &&
+ uv_bind(handle, (struct sockaddr*)&uv_addr_ip4_any_) < 0)
return -1;
} else if (addr->sa_family == AF_INET6) {
addrsize = sizeof(struct sockaddr_in6);
@@ -904,7 +904,7 @@ int oio_connect(oio_req_t* req, struct sockaddr* addr) {
}
memset(&req->overlapped, 0, sizeof(req->overlapped));
- req->type = OIO_CONNECT;
+ req->type = UV_CONNECT;
success = pConnectEx(handle->socket,
addr,
@@ -915,18 +915,18 @@ int oio_connect(oio_req_t* req, struct sockaddr* addr) {
&req->overlapped);
if (!success && WSAGetLastError() != ERROR_IO_PENDING) {
- oio_set_sys_error(WSAGetLastError());
+ uv_set_sys_error(WSAGetLastError());
return -1;
}
- req->flags |= OIO_REQ_PENDING;
+ req->flags |= UV_REQ_PENDING;
handle->reqs_pending++;
return 0;
}
-static size_t oio_count_bufs(oio_buf bufs[], int count) {
+static size_t uv_count_bufs(uv_buf bufs[], int count) {
size_t bytes = 0;
int i;
@@ -938,25 +938,25 @@ static size_t oio_count_bufs(oio_buf bufs[], int count) {
}
-int oio_write(oio_req_t* req, oio_buf bufs[], int bufcnt) {
+int uv_write(uv_req_t* req, uv_buf bufs[], int bufcnt) {
int result;
DWORD bytes, err;
- oio_handle_t* handle = req->handle;
+ uv_handle_t* handle = req->handle;
- assert(!(req->flags & OIO_REQ_PENDING));
+ assert(!(req->flags & UV_REQ_PENDING));
- if (!(req->handle->flags & OIO_HANDLE_CONNECTION)) {
- oio_set_sys_error(WSAEINVAL);
+ if (!(req->handle->flags & UV_HANDLE_CONNECTION)) {
+ uv_set_sys_error(WSAEINVAL);
return -1;
}
- if (req->handle->flags & OIO_HANDLE_SHUTTING) {
- oio_set_sys_error(WSAESHUTDOWN);
+ if (req->handle->flags & UV_HANDLE_SHUTTING) {
+ uv_set_sys_error(WSAESHUTDOWN);
return -1;
}
memset(&req->overlapped, 0, sizeof(req->overlapped));
- req->type = OIO_WRITE;
+ req->type = UV_WRITE;
result = WSASend(handle->socket,
(WSABUF*)bufs,
@@ -969,7 +969,7 @@ int oio_write(oio_req_t* req, oio_buf bufs[], int bufcnt) {
err = WSAGetLastError();
if (err != WSA_IO_PENDING) {
/* Send faild due to an error */
- oio_set_sys_error(WSAGetLastError());
+ uv_set_sys_error(WSAGetLastError());
return -1;
}
}
@@ -979,11 +979,11 @@ int oio_write(oio_req_t* req, oio_buf bufs[], int bufcnt) {
req->queued_bytes = 0;
} else {
/* Request queued by the kernel */
- req->queued_bytes = oio_count_bufs(bufs, bufcnt);
+ req->queued_bytes = uv_count_bufs(bufs, bufcnt);
handle->write_queue_size += req->queued_bytes;
}
- req->flags |= OIO_REQ_PENDING;
+ req->flags |= UV_REQ_PENDING;
handle->reqs_pending++;
handle->write_reqs_pending++;
@@ -991,70 +991,70 @@ int oio_write(oio_req_t* req, oio_buf bufs[], int bufcnt) {
}
-int oio_shutdown(oio_req_t* req) {
- oio_handle_t* handle = req->handle;
+int uv_shutdown(uv_req_t* req) {
+ uv_handle_t* handle = req->handle;
int status = 0;
- if (!(req->handle->flags & OIO_HANDLE_CONNECTION)) {
- oio_set_sys_error(WSAEINVAL);
+ if (!(req->handle->flags & UV_HANDLE_CONNECTION)) {
+ uv_set_sys_error(WSAEINVAL);
return -1;
}
- if (handle->flags & OIO_HANDLE_SHUTTING) {
- oio_set_sys_error(WSAESHUTDOWN);
+ if (handle->flags & UV_HANDLE_SHUTTING) {
+ uv_set_sys_error(WSAESHUTDOWN);
return -1;
}
- req->type = OIO_SHUTDOWN;
- req->flags |= OIO_REQ_PENDING;
+ req->type = UV_SHUTDOWN;
+ req->flags |= UV_REQ_PENDING;
- handle->flags |= OIO_HANDLE_SHUTTING;
+ handle->flags |= UV_HANDLE_SHUTTING;
handle->shutdown_req = req;
handle->reqs_pending++;
- oio_want_endgame(handle);
+ uv_want_endgame(handle);
return 0;
}
-static void oio_tcp_return_req(oio_handle_t* handle, oio_req_t* req) {
+static void uv_tcp_return_req(uv_handle_t* handle, uv_req_t* req) {
BOOL success;
DWORD bytes, flags, err;
- oio_buf buf;
+ uv_buf buf;
- assert(handle->type == OIO_TCP);
+ assert(handle->type == UV_TCP);
/* Mark the request non-pending */
- req->flags &= ~OIO_REQ_PENDING;
+ req->flags &= ~UV_REQ_PENDING;
switch (req->type) {
- case OIO_WRITE:
+ case UV_WRITE:
success = GetOverlappedResult(handle->handle, &req->overlapped, &bytes, FALSE);
handle->write_queue_size -= req->queued_bytes;
if (!success) {
- oio_set_sys_error(GetLastError());
- oio_close_error(handle, oio_last_error_);
+ uv_set_sys_error(GetLastError());
+ uv_close_error(handle, uv_last_error_);
}
if (req->cb) {
- ((oio_write_cb)req->cb)(req, success ? 0 : -1);
+ ((uv_write_cb)req->cb)(req, success ? 0 : -1);
}
handle->write_reqs_pending--;
if (success &&
handle->write_reqs_pending == 0 &&
- handle->flags & OIO_HANDLE_SHUTTING) {
- oio_want_endgame(handle);
+ handle->flags & UV_HANDLE_SHUTTING) {
+ uv_want_endgame(handle);
}
break;
- case OIO_READ:
+ case UV_READ:
success = GetOverlappedResult(handle->handle, &req->overlapped, &bytes, FALSE);
if (!success) {
- oio_set_sys_error(GetLastError());
- oio_close_error(handle, oio_last_error_);
+ uv_set_sys_error(GetLastError());
+ uv_close_error(handle, uv_last_error_);
}
- while (handle->flags & OIO_HANDLE_READING) {
- buf = oio_alloc_(handle, 65536);
+ while (handle->flags & UV_HANDLE_READING) {
+ buf = uv_alloc_(handle, 65536);
assert(buf.len > 0);
flags = 0;
if (WSARecv(handle->socket,
@@ -1066,44 +1066,44 @@ static void oio_tcp_return_req(oio_handle_t* handle, oio_req_t* req) {
NULL) != SOCKET_ERROR) {
if (bytes > 0) {
/* Successful read */
- ((oio_read_cb)handle->read_cb)(handle, bytes, buf);
+ ((uv_read_cb)handle->read_cb)(handle, bytes, buf);
/* Read again only if bytes == buf.len */
if (bytes < buf.len) {
break;
}
} else {
/* Connection closed */
- handle->flags &= ~OIO_HANDLE_READING;
- handle->flags |= OIO_HANDLE_EOF;
- oio_last_error_.code = OIO_EOF;
- oio_last_error_.sys_errno_ = ERROR_SUCCESS;
- ((oio_read_cb)handle->read_cb)(handle, -1, buf);
- oio_want_endgame(handle);
+ handle->flags &= ~UV_HANDLE_READING;
+ handle->flags |= UV_HANDLE_EOF;
+ uv_last_error_.code = UV_EOF;
+ uv_last_error_.sys_errno_ = ERROR_SUCCESS;
+ ((uv_read_cb)handle->read_cb)(handle, -1, buf);
+ uv_want_endgame(handle);
break;
}
} else {
err = WSAGetLastError();
if (err == WSAEWOULDBLOCK) {
/* 0-byte read */
- oio_set_sys_error(WSAEWOULDBLOCK);
- ((oio_read_cb)handle->read_cb)(handle, 0, buf);
+ uv_set_sys_error(WSAEWOULDBLOCK);
+ ((uv_read_cb)handle->read_cb)(handle, 0, buf);
} else {
/* Ouch! serious error. */
- oio_set_sys_error(err);
- oio_close_error(handle, oio_last_error_);
+ uv_set_sys_error(err);
+ uv_close_error(handle, uv_last_error_);
}
break;
}
}
/* Post another 0-read if still reading and not closing */
- if (!(handle->flags & OIO_HANDLE_CLOSING) &&
- !(handle->flags & OIO_HANDLE_EOF) &&
- handle->flags & OIO_HANDLE_READING) {
- oio_queue_read(handle);
+ if (!(handle->flags & UV_HANDLE_CLOSING) &&
+ !(handle->flags & UV_HANDLE_EOF) &&
+ handle->flags & UV_HANDLE_READING) {
+ uv_queue_read(handle);
}
break;
- case OIO_ACCEPT:
+ case UV_ACCEPT:
assert(handle->accept_socket != INVALID_SOCKET);
success = GetOverlappedResult(handle->handle, &req->overlapped, &bytes, FALSE);
@@ -1115,18 +1115,18 @@ static void oio_tcp_return_req(oio_handle_t* handle, oio_req_t* req) {
if (success) {
if (handle->accept_cb) {
- ((oio_accept_cb)handle->accept_cb)(handle);
+ ((uv_accept_cb)handle->accept_cb)(handle);
}
} else {
/* Errorneous accept is ignored if the listen socket is still healthy. */
closesocket(handle->accept_socket);
- if (!(handle->flags & OIO_HANDLE_CLOSING)) {
- oio_queue_accept(handle);
+ if (!(handle->flags & UV_HANDLE_CLOSING)) {
+ uv_queue_accept(handle);
}
}
break;
- case OIO_CONNECT:
+ case UV_CONNECT:
if (req->cb) {
success = GetOverlappedResult(handle->handle,
&req->overlapped,
@@ -1138,15 +1138,15 @@ static void oio_tcp_return_req(oio_handle_t* handle, oio_req_t* req) {
SO_UPDATE_CONNECT_CONTEXT,
NULL,
0) == 0) {
- oio_tcp_init_connection(handle);
- ((oio_connect_cb)req->cb)(req, 0);
+ uv_tcp_init_connection(handle);
+ ((uv_connect_cb)req->cb)(req, 0);
} else {
- oio_set_sys_error(WSAGetLastError());
- ((oio_connect_cb)req->cb)(req, -1);
+ uv_set_sys_error(WSAGetLastError());
+ ((uv_connect_cb)req->cb)(req, -1);
}
} else {
- oio_set_sys_error(WSAGetLastError());
- ((oio_connect_cb)req->cb)(req, -1);
+ uv_set_sys_error(WSAGetLastError());
+ ((uv_connect_cb)req->cb)(req, -1);
}
}
break;
@@ -1160,14 +1160,14 @@ static void oio_tcp_return_req(oio_handle_t* handle, oio_req_t* req) {
/* Queue the handle's close callback if it is closing and there are no */
/* more pending requests. */
- if (handle->flags & OIO_HANDLE_CLOSING &&
+ if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
- oio_want_endgame(handle);
+ uv_want_endgame(handle);
}
}
-static int oio_timer_compare(oio_req_t* a, oio_req_t* b) {
+static int uv_timer_compare(uv_req_t* a, uv_req_t* b) {
if (a->due < b->due)
return -1;
if (a->due > b->due)
@@ -1180,58 +1180,58 @@ static int oio_timer_compare(oio_req_t* a, oio_req_t* b) {
}
-RB_GENERATE_STATIC(oio_timer_s, oio_req_s, tree_entry, oio_timer_compare);
+RB_GENERATE_STATIC(uv_timer_s, uv_req_s, tree_entry, uv_timer_compare);
-int oio_timeout(oio_req_t* req, int64_t timeout) {
- assert(!(req->flags & OIO_REQ_PENDING));
+int uv_timeout(uv_req_t* req, int64_t timeout) {
+ assert(!(req->flags & UV_REQ_PENDING));
- req->type = OIO_TIMEOUT;
+ req->type = UV_TIMEOUT;
- req->due = oio_now_ + timeout;
- if (RB_INSERT(oio_timer_s, &oio_timers_, req) != NULL) {
- oio_set_sys_error(ERROR_INVALID_DATA);
+ req->due = uv_now_ + timeout;
+ if (RB_INSERT(uv_timer_s, &uv_timers_, req) != NULL) {
+ uv_set_sys_error(ERROR_INVALID_DATA);
return -1;
}
- oio_refs_++;
- req->flags |= OIO_REQ_PENDING;
+ uv_refs_++;
+ req->flags |= UV_REQ_PENDING;
return 0;
}
-void oio_update_time() {
+void uv_update_time() {
LARGE_INTEGER counter;
if (!QueryPerformanceCounter(&counter))
- oio_fatal_error(GetLastError(), "QueryPerformanceCounter");
+ uv_fatal_error(GetLastError(), "QueryPerformanceCounter");
- oio_now_ = counter.QuadPart / oio_ticks_per_msec_;
+ uv_now_ = counter.QuadPart / uv_ticks_per_msec_;
}
-int64_t oio_now() {
- return oio_now_;
+int64_t uv_now() {
+ return uv_now_;
}
-int oio_loop_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
+int uv_loop_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
handle->close_cb = (void*) close_cb;
handle->data = data;
handle->flags = 0;
- handle->error = oio_ok_;
+ handle->error = uv_ok_;
- oio_refs_++;
+ uv_refs_++;
return 0;
}
-static int oio_loop_start(oio_handle_t* handle, oio_loop_cb loop_cb,
- oio_handle_t** list) {
- oio_handle_t* old_head;
+static int uv_loop_start(uv_handle_t* handle, uv_loop_cb loop_cb,
+ uv_handle_t** list) {
+ uv_handle_t* old_head;
- if (handle->flags & OIO_HANDLE_ACTIVE)
+ if (handle->flags & UV_HANDLE_ACTIVE)
return 0;
old_head = *list;
@@ -1246,14 +1246,14 @@ static int oio_loop_start(oio_handle_t* handle, oio_loop_cb loop_cb,
*list = handle;
handle->loop_cb = loop_cb;
- handle->flags |= OIO_HANDLE_ACTIVE;
+ handle->flags |= UV_HANDLE_ACTIVE;
return 0;
}
-static int oio_loop_stop(oio_handle_t* handle, oio_handle_t** list) {
- if (!(handle->flags & OIO_HANDLE_ACTIVE))
+static int uv_loop_stop(uv_handle_t* handle, uv_handle_t** list) {
+ if (!(handle->flags & UV_HANDLE_ACTIVE))
return 0;
/* Update loop head if needed */
@@ -1262,8 +1262,8 @@ static int oio_loop_stop(oio_handle_t* handle, oio_handle_t** list) {
}
/* Update the iterator-next pointer of needed */
- if (oio_next_loop_handle_ == handle) {
- oio_next_loop_handle_ = handle->loop_next;
+ if (uv_next_loop_handle_ == handle) {
+ uv_next_loop_handle_ = handle->loop_next;
}
if (handle->loop_prev) {
@@ -1273,117 +1273,117 @@ static int oio_loop_stop(oio_handle_t* handle, oio_handle_t** list) {
handle->loop_next->loop_prev = handle->loop_prev;
}
- handle->flags &= ~OIO_HANDLE_ACTIVE;
+ handle->flags &= ~UV_HANDLE_ACTIVE;
return 0;
}
-static void oio_loop_invoke(oio_handle_t* list) {
- oio_handle_t *handle;
+static void uv_loop_invoke(uv_handle_t* list) {
+ uv_handle_t *handle;
- oio_next_loop_handle_ = list;
+ uv_next_loop_handle_ = list;
- while (oio_next_loop_handle_ != NULL) {
- handle = oio_next_loop_handle_;
- oio_next_loop_handle_ = handle->loop_next;
+ while (uv_next_loop_handle_ != NULL) {
+ handle = uv_next_loop_handle_;
+ uv_next_loop_handle_ = handle->loop_next;
- ((oio_loop_cb)handle->loop_cb)(handle, 0);
+ ((uv_loop_cb)handle->loop_cb)(handle, 0);
}
}
-int oio_prepare_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
- handle->type = OIO_PREPARE;
- return oio_loop_init(handle, close_cb, data);
+int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+ handle->type = UV_PREPARE;
+ return uv_loop_init(handle, close_cb, data);
}
-int oio_check_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
- handle->type = OIO_CHECK;
- return oio_loop_init(handle, close_cb, data);
+int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+ handle->type = UV_CHECK;
+ return uv_loop_init(handle, close_cb, data);
}
-int oio_idle_init(oio_handle_t* handle, oio_close_cb close_cb, void* data) {
- handle->type = OIO_IDLE;
- return oio_loop_init(handle, close_cb, data);
+int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
+ handle->type = UV_IDLE;
+ return uv_loop_init(handle, close_cb, data);
}
-int oio_prepare_start(oio_handle_t* handle, oio_loop_cb loop_cb) {
- assert(handle->type == OIO_PREPARE);
- return oio_loop_start(handle, loop_cb, &oio_prepare_handles_);
+int uv_prepare_start(uv_handle_t* handle, uv_loop_cb loop_cb) {
+ assert(handle->type == UV_PREPARE);
+ return uv_loop_start(handle, loop_cb, &uv_prepare_handles_);
}
-int oio_check_start(oio_handle_t* handle, oio_loop_cb loop_cb) {
- assert(handle->type == OIO_CHECK);
- return oio_loop_start(handle, loop_cb, &oio_check_handles_);
+int uv_check_start(uv_handle_t* handle, uv_loop_cb loop_cb) {
+ assert(handle->type == UV_CHECK);
+ return uv_loop_start(handle, loop_cb, &uv_check_handles_);
}
-int oio_idle_start(oio_handle_t* handle, oio_loop_cb loop_cb) {
- assert(handle->type == OIO_IDLE);
- return oio_loop_start(handle, loop_cb, &oio_idle_handles_);
+int uv_idle_start(uv_handle_t* handle, uv_loop_cb loop_cb) {
+ assert(handle->type == UV_IDLE);
+ return uv_loop_start(handle, loop_cb, &uv_idle_handles_);
}
-int oio_prepare_stop(oio_handle_t* handle) {
- assert(handle->type == OIO_PREPARE);
- return oio_loop_stop(handle, &oio_prepare_handles_);
+int uv_prepare_stop(uv_handle_t* handle) {
+ assert(handle->type == UV_PREPARE);
+ return uv_loop_stop(handle, &uv_prepare_handles_);
}
-int oio_check_stop(oio_handle_t* handle) {
- assert(handle->type == OIO_CHECK);
- return oio_loop_stop(handle, &oio_check_handles_);
+int uv_check_stop(uv_handle_t* handle) {
+ assert(handle->type == UV_CHECK);
+ return uv_loop_stop(handle, &uv_check_handles_);
}
-int oio_idle_stop(oio_handle_t* handle) {
- assert(handle->type == OIO_IDLE);
- return oio_loop_stop(handle, &oio_idle_handles_);
+int uv_idle_stop(uv_handle_t* handle) {
+ assert(handle->type == UV_IDLE);
+ return uv_loop_stop(handle, &uv_idle_handles_);
}
-int oio_async_init(oio_handle_t* handle, oio_async_cb async_cb,
- oio_close_cb close_cb, void* data) {
- oio_req_t* req;
+int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
+ uv_close_cb close_cb, void* data) {
+ uv_req_t* req;
- handle->type = OIO_ASYNC;
+ handle->type = UV_ASYNC;
handle->close_cb = (void*) close_cb;
handle->data = data;
handle->flags = 0;
handle->async_sent = 0;
- handle->error = oio_ok_;
+ handle->error = uv_ok_;
req = &handle->async_req;
- oio_req_init(req, handle, async_cb);
- req->type = OIO_WAKEUP;
+ uv_req_init(req, handle, async_cb);
+ req->type = UV_WAKEUP;
- oio_refs_++;
+ uv_refs_++;
return 0;
}
-int oio_async_send(oio_handle_t* handle) {
- if (handle->type != OIO_ASYNC) {
+int uv_async_send(uv_handle_t* handle) {
+ if (handle->type != UV_ASYNC) {
/* Can't set errno because that's not thread-safe. */
return -1;
}
- /* The user should make sure never to call oio_async_send to a closing */
+ /* The user should make sure never to call uv_async_send to a closing */
/* or closed handle. */
- assert(!(handle->flags & OIO_HANDLE_CLOSING));
+ assert(!(handle->flags & UV_HANDLE_CLOSING));
- if (!oio_atomic_exchange_set(&handle->async_sent)) {
- if (!PostQueuedCompletionStatus(oio_iocp_,
+ if (!uv_atomic_exchange_set(&handle->async_sent)) {
+ if (!PostQueuedCompletionStatus(uv_iocp_,
0,
0,
&handle->async_req.overlapped)) {
- oio_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
+ uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
}
}
@@ -1391,44 +1391,44 @@ int oio_async_send(oio_handle_t* handle) {
}
-static void oio_async_return_req(oio_handle_t* handle, oio_req_t* req) {
- assert(handle->type == OIO_ASYNC);
- assert(req->type == OIO_WAKEUP);
+static void uv_async_return_req(uv_handle_t* handle, uv_req_t* req) {
+ assert(handle->type == UV_ASYNC);
+ assert(req->type == UV_WAKEUP);
handle->async_sent = 0;
if (req->cb) {
- ((oio_async_cb)req->cb)(handle, 0);
+ ((uv_async_cb)req->cb)(handle, 0);
}
- if (handle->flags & OIO_HANDLE_CLOSING) {
- oio_want_endgame(handle);
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ uv_want_endgame(handle);
}
}
-static void oio_poll() {
+static void uv_poll() {
BOOL success;
DWORD bytes;
ULONG_PTR key;
OVERLAPPED* overlapped;
- oio_req_t* req;
- oio_handle_t* handle;
+ uv_req_t* req;
+ uv_handle_t* handle;
DWORD timeout;
int64_t delta;
/* Call all pending close callbacks. */
/* TODO: ugly, fixme. */
- oio_call_endgames();
- if (oio_refs_ == 0)
+ uv_call_endgames();
+ if (uv_refs_ == 0)
return;
- oio_loop_invoke(oio_prepare_handles_);
+ uv_loop_invoke(uv_prepare_handles_);
- oio_update_time();
+ uv_update_time();
/* Check if there are any running timers */
- req = RB_MIN(oio_timer_s, &oio_timers_);
+ req = RB_MIN(uv_timer_s, &uv_timers_);
if (req) {
- delta = req->due - oio_now_;
+ delta = req->due - uv_now_;
if (delta >= UINT_MAX) {
/* Can't have a timeout greater than UINT_MAX, and a timeout value of */
/* UINT_MAX means infinite, so that's no good either. */
@@ -1444,39 +1444,39 @@ static void oio_poll() {
timeout = INFINITE;
}
- success = GetQueuedCompletionStatus(oio_iocp_,
+ success = GetQueuedCompletionStatus(uv_iocp_,
&bytes,
&key,
&overlapped,
timeout);
- oio_update_time();
+ uv_update_time();
/* Call check callbacks */
- oio_loop_invoke(oio_check_handles_);
+ uv_loop_invoke(uv_check_handles_);
/* Call timer callbacks */
- for (req = RB_MIN(oio_timer_s, &oio_timers_);
- req != NULL && req->due <= oio_now_;
- req = RB_MIN(oio_timer_s, &oio_timers_)) {
- RB_REMOVE(oio_timer_s, &oio_timers_, req);
- req->flags &= ~OIO_REQ_PENDING;
- oio_refs_--;
- ((oio_timer_cb)req->cb)(req, req->due - oio_now_, 0);
+ for (req = RB_MIN(uv_timer_s, &uv_timers_);
+ req != NULL && req->due <= uv_now_;
+ req = RB_MIN(uv_timer_s, &uv_timers_)) {
+ RB_REMOVE(uv_timer_s, &uv_timers_, req);
+ req->flags &= ~UV_REQ_PENDING;
+ uv_refs_--;
+ ((uv_timer_cb)req->cb)(req, req->due - uv_now_, 0);
}
/* Only if a iocp package was dequeued... */
if (overlapped) {
- req = oio_overlapped_to_req(overlapped);
+ req = uv_overlapped_to_req(overlapped);
handle = req->handle;
switch (handle->type) {
- case OIO_TCP:
- oio_tcp_return_req(handle, req);
+ case UV_TCP:
+ uv_tcp_return_req(handle, req);
break;
- case OIO_ASYNC:
- oio_async_return_req(handle, req);
+ case UV_ASYNC:
+ uv_async_return_req(handle, req);
break;
default:
@@ -1485,27 +1485,27 @@ static void oio_poll() {
} /* if (overlapped) */
/* Call idle callbacks */
- while (oio_idle_handles_) {
- oio_call_endgames();
- oio_loop_invoke(oio_idle_handles_);
+ while (uv_idle_handles_) {
+ uv_call_endgames();
+ uv_loop_invoke(uv_idle_handles_);
}
}
-int oio_run() {
- while (oio_refs_ > 0) {
- oio_poll();
+int uv_run() {
+ while (uv_refs_ > 0) {
+ uv_poll();
}
- assert(oio_refs_ == 0);
+ assert(uv_refs_ == 0);
return 0;
}
-void oio_ref() {
- oio_refs_++;
+void uv_ref() {
+ uv_refs_++;
}
-void oio_unref() {
- oio_refs_--;
+void uv_unref() {
+ uv_refs_--;
}
diff --git a/oio-win.h b/uv-win.h
similarity index 72%
rename from oio-win.h
rename to uv-win.h
index b1a4ea6d..2f16197e 100644
--- a/oio-win.h
+++ b/uv-win.h
@@ -33,15 +33,15 @@
/**
- * It should be possible to cast oio_buf[] to WSABUF[]
+ * It should be possible to cast uv_buf[] to WSABUF[]
* see http://msdn.microsoft.com/en-us/library/ms741542(v=vs.85).aspx
*/
-typedef struct oio_buf {
+typedef struct uv_buf {
ULONG len;
char* base;
-} oio_buf;
+} uv_buf;
-#define oio_req_private_fields \
+#define uv_req_private_fields \
union { \
/* Used by I/O operations */ \
struct { \
@@ -50,51 +50,51 @@ typedef struct oio_buf {
}; \
/* Used by timers */ \
struct { \
- RB_ENTRY(oio_req_s) tree_entry; \
+ RB_ENTRY(uv_req_s) tree_entry; \
int64_t due; \
}; \
}; \
int flags;
-#define oio_tcp_connection_fields \
+#define uv_tcp_connection_fields \
void* read_cb; \
- struct oio_req_s read_req; \
+ struct uv_req_s read_req; \
unsigned int write_reqs_pending; \
- oio_req_t* shutdown_req;
+ uv_req_t* shutdown_req;
-#define oio_tcp_server_fields \
+#define uv_tcp_server_fields \
void *accept_cb; \
SOCKET accept_socket; \
- struct oio_req_s accept_req; \
+ struct uv_req_s accept_req; \
char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
-#define oio_tcp_fields \
+#define uv_tcp_fields \
unsigned int reqs_pending; \
union { \
SOCKET socket; \
HANDLE handle; \
}; \
union { \
- struct { oio_tcp_connection_fields }; \
- struct { oio_tcp_server_fields }; \
+ struct { uv_tcp_connection_fields }; \
+ struct { uv_tcp_server_fields }; \
};
-#define oio_loop_fields \
- oio_handle_t* loop_prev; \
- oio_handle_t* loop_next; \
+#define uv_loop_fields \
+ uv_handle_t* loop_prev; \
+ uv_handle_t* loop_next; \
void* loop_cb;
-#define oio_async_fields \
- struct oio_req_s async_req; \
+#define uv_async_fields \
+ struct uv_req_s async_req; \
/* char to avoid alignment issues */ \
char volatile async_sent;
-#define oio_handle_private_fields \
- oio_handle_t* endgame_next; \
+#define uv_handle_private_fields \
+ uv_handle_t* endgame_next; \
unsigned int flags; \
- oio_err_t error; \
+ uv_err_t error; \
union { \
- struct { oio_tcp_fields }; \
- struct { oio_loop_fields }; \
- struct { oio_async_fields }; \
+ struct { uv_tcp_fields }; \
+ struct { uv_loop_fields }; \
+ struct { uv_async_fields }; \
};
diff --git a/uv.h b/uv.h
new file mode 100644
index 00000000..95dde525
--- /dev/null
+++ b/uv.h
@@ -0,0 +1,271 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_H
+#define UV_H
+
+#define UV_VERSION_MAJOR 0
+#define UV_VERSION_MINOR 1
+
+#include /* int64_t */
+#include /* size_t */
+
+typedef struct uv_err_s uv_err_t;
+typedef struct uv_handle_s uv_handle_t;
+typedef struct uv_req_s uv_req_t;
+
+
+#if defined(__unix__) || defined(__POSIX__) || defined(__APPLE__)
+# include "uv-unix.h"
+#else
+# include "uv-win.h"
+#endif
+
+
+/* The status parameter is 0 if the request completed successfully,
+ * and should be -1 if the request was cancelled or failed.
+ * For uv_close_cb, -1 means that the handle was closed due to an error.
+ * Error details can be obtained by calling uv_last_error().
+ *
+ * In the case of uv_read_cb the uv_buf returned should be freed by the
+ * user.
+ */
+typedef uv_buf (*uv_alloc_cb)(uv_handle_t* handle, size_t suggested_size);
+typedef void (*uv_read_cb)(uv_handle_t *handle, int nread, uv_buf buf);
+typedef void (*uv_write_cb)(uv_req_t* req, int status);
+typedef void (*uv_connect_cb)(uv_req_t* req, int status);
+typedef void (*uv_shutdown_cb)(uv_req_t* req, int status);
+typedef void (*uv_accept_cb)(uv_handle_t* handle);
+typedef void (*uv_close_cb)(uv_handle_t* handle, int status);
+typedef void (*uv_timer_cb)(uv_req_t* req, int64_t skew, int status);
+/* TODO: do loop_cb and async_cb really need a status argument? */
+typedef void (*uv_loop_cb)(uv_handle_t* handle, int status);
+typedef void (*uv_async_cb)(uv_handle_t* handle, int stats);
+
+
+/* Expand this list if necessary. */
+typedef enum {
+ UV_UNKNOWN = -1,
+ UV_OK = 0,
+ UV_EOF,
+ UV_EACCESS,
+ UV_EAGAIN,
+ UV_EADDRINUSE,
+ UV_EADDRNOTAVAIL,
+ UV_EAFNOSUPPORT,
+ UV_EALREADY,
+ UV_EBADF,
+ UV_EBUSY,
+ UV_ECONNABORTED,
+ UV_ECONNREFUSED,
+ UV_ECONNRESET,
+ UV_EDESTADDRREQ,
+ UV_EFAULT,
+ UV_EHOSTUNREACH,
+ UV_EINTR,
+ UV_EINVAL,
+ UV_EISCONN,
+ UV_EMFILE,
+ UV_ENETDOWN,
+ UV_ENETUNREACH,
+ UV_ENFILE,
+ UV_ENOBUFS,
+ UV_ENOMEM,
+ UV_ENONET,
+ UV_ENOPROTOOPT,
+ UV_ENOTCONN,
+ UV_ENOTSOCK,
+ UV_ENOTSUP,
+ UV_EPROTO,
+ UV_EPROTONOSUPPORT,
+ UV_EPROTOTYPE,
+ UV_ETIMEDOUT
+} uv_err_code;
+
+typedef enum {
+ UV_UNKNOWN_HANDLE = 0,
+ UV_TCP,
+ UV_NAMED_PIPE,
+ UV_TTY,
+ UV_FILE,
+ UV_PREPARE,
+ UV_CHECK,
+ UV_IDLE,
+ UV_ASYNC
+} uv_handle_type;
+
+typedef enum {
+ UV_UNKNOWN_REQ = 0,
+ UV_CONNECT,
+ UV_ACCEPT,
+ UV_READ,
+ UV_WRITE,
+ UV_SHUTDOWN,
+ UV_TIMEOUT,
+ UV_WAKEUP
+} uv_req_type;
+
+
+struct uv_err_s {
+ /* read-only */
+ uv_err_code code;
+ /* private */
+ int sys_errno_;
+};
+
+
+struct uv_req_s {
+ /* read-only */
+ uv_req_type type;
+ /* public */
+ uv_handle_t* handle;
+ void* cb;
+ void* data;
+ /* private */
+ uv_req_private_fields
+};
+
+
+struct uv_handle_s {
+ /* read-only */
+ uv_handle_type type;
+ /* public */
+ uv_close_cb close_cb;
+ void* data;
+ /* number of bytes queued for writing */
+ size_t write_queue_size;
+ /* private */
+ uv_handle_private_fields
+};
+
+
+/* Most functions return boolean: 0 for success and -1 for failure.
+ * On error the user should then call uv_last_error() to determine
+ * the error code.
+ */
+uv_err_t uv_last_error();
+char* uv_strerror(uv_err_t err);
+
+
+void uv_init(uv_alloc_cb alloc);
+int uv_run();
+
+/* Manually modify the event loop's reference count. Useful if the user wants
+ * to have a handle or timeout that doesn't keep the loop alive.
+ */
+void uv_ref();
+void uv_unref();
+
+void uv_update_time();
+int64_t uv_now();
+
+void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb);
+
+/*
+ * TODO:
+ * - uv_(pipe|pipe_tty)_handle_init
+ * - uv_bind_pipe(char* name)
+ * - uv_continuous_read(uv_handle_t* handle, uv_continuous_read_cb* cb)
+ * - A way to list cancelled uv_reqs after before/on uv_close_cb
+ */
+
+/* TCP socket methods.
+ * Handle and callback bust be set by calling uv_req_init.
+ */
+int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
+int uv_bind(uv_handle_t* handle, struct sockaddr* addr);
+
+int uv_connect(uv_req_t* req, struct sockaddr* addr);
+int uv_shutdown(uv_req_t* req);
+
+/* TCP server methods. */
+int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb);
+
+/* Call this after accept_cb. client does not need to be initialized. */
+int uv_accept(uv_handle_t* server, uv_handle_t* client,
+ uv_close_cb close_cb, void* data);
+
+
+/* Read data from an incoming stream. The callback will be made several
+ * several times until there is no more data to read or uv_read_stop is
+ * called. When we've reached EOF nread will be set to -1 and the error is
+ * set to UV_EOF. When nread == -1 the buf parameter might not point to a
+ * valid buffer; in that case buf.len and buf.base are both set to 0.
+ * Note that nread might also be 0, which does *not* indicate an error or
+ * eof; it happens when libuv requested a buffer through the alloc callback
+ * but then decided that it didn't need that buffer.
+ */
+int uv_read_start(uv_handle_t* handle, uv_read_cb cb);
+int uv_read_stop(uv_handle_t* handle);
+
+int uv_write(uv_req_t* req, uv_buf bufs[], int bufcnt);
+
+/* Timer methods */
+int uv_timeout(uv_req_t* req, int64_t timeout);
+
+/* libev wrapper. Every active prepare handle gets its callback called
+ * exactly once per loop iteration, just before the system blocks to wait
+ * for completed i/o.
+ */
+int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
+int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb);
+int uv_prepare_stop(uv_handle_t* handle);
+
+/* libev wrapper. Every active check handle gets its callback called exactly
+ * once per loop iteration, just after the system returns from blocking.
+ */
+int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
+int uv_check_start(uv_handle_t* handle, uv_loop_cb cb);
+int uv_check_stop(uv_handle_t* handle);
+
+/* libev wrapper. Every active idle handle gets its callback called repeatedly until it is
+ * stopped. This happens after all other types of callbacks are processed.
+ * When there are multiple "idle" handles active, their callbacks are called
+ * in turn.
+ */
+int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
+int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb);
+int uv_idle_stop(uv_handle_t* handle);
+
+/* libev wrapper. uv_async_send wakes up the event loop and calls the async
+ * handle's callback There is no guarantee that every uv_async_send call
+ * leads to exactly one invocation of the callback; The only guarantee is
+ * that the callback function is called at least once after the call to
+ * async_send. Unlike everything else, uv_async_send can be called from
+ * another thread.
+ *
+ * QUESTION(ryan) Can UV_ASYNC just use uv_loop_cb? Same signature on my
+ * side.
+ */
+int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
+ uv_close_cb close_cb, void* data);
+int uv_async_send(uv_handle_t* handle);
+
+/* Request handle to be closed. close_cb will be called
+ * asynchronously after this call.
+ */
+int uv_close(uv_handle_t* handle);
+
+
+/* Utility */
+struct sockaddr_in uv_ip4_addr(char* ip, int port);
+
+#endif /* UV_H */