oio -> uv
This commit is contained in:
parent
b4e9d3c337
commit
2ef3c6c632
8
LICENSE
8
LICENSE
@ -1,5 +1,5 @@
|
||||
liboio is part of the Node project: http://nodejs.org/
|
||||
liboio may be distributed alone under Node's license:
|
||||
libuv is part of the Node project: http://nodejs.org/
|
||||
libuv may be distributed alone under Node's license:
|
||||
|
||||
====
|
||||
|
||||
@ -24,10 +24,10 @@ IN THE SOFTWARE.
|
||||
|
||||
====
|
||||
|
||||
This license applies to all parts of liboio that are not externally
|
||||
This license applies to all parts of libuv that are not externally
|
||||
maintained libraries.
|
||||
|
||||
The externally maintained libraries used by liboio are:
|
||||
The externally maintained libraries used by libuv are:
|
||||
|
||||
- tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.
|
||||
|
||||
|
||||
10
Makefile
10
Makefile
@ -26,15 +26,15 @@ else
|
||||
include config-unix.mk
|
||||
endif
|
||||
|
||||
all: oio.a test/run-tests test/run-benchmarks
|
||||
all: uv.a test/run-tests test/run-benchmarks
|
||||
|
||||
test/run-tests$(E): test/*.h test/run-tests.c $(RUNNER_SRC) test/runner-unix.c $(TESTS) oio.a
|
||||
test/run-tests$(E): test/*.h test/run-tests.c $(RUNNER_SRC) test/runner-unix.c $(TESTS) uv.a
|
||||
$(CC) $(RUNNER_CFLAGS) $(RUNNER_LINKFLAGS) -o test/run-tests test/run-tests.c \
|
||||
test/runner.c $(RUNNER_SRC) $(TESTS) oio.a $(RUNNER_LIBS)
|
||||
test/runner.c $(RUNNER_SRC) $(TESTS) uv.a $(RUNNER_LIBS)
|
||||
|
||||
test/run-benchmarks$(E): test/*.h test/run-benchmarks.c test/runner.c $(RUNNER_SRC) $(BENCHMARKS) oio.a
|
||||
test/run-benchmarks$(E): test/*.h test/run-benchmarks.c test/runner.c $(RUNNER_SRC) $(BENCHMARKS) uv.a
|
||||
$(CC) $(RUNNER_CFLAGS) $(RUNNER_LINKFLAGS) -o test/run-benchmarks test/run-benchmarks.c \
|
||||
test/runner.c $(RUNNER_SRC) $(BENCHMARKS) oio.a $(RUNNER_LIBS)
|
||||
test/runner.c $(RUNNER_SRC) $(BENCHMARKS) uv.a $(RUNNER_LIBS)
|
||||
|
||||
test/echo.o: test/echo.c test/echo.h
|
||||
$(CC) $(CFLAGS) -c test/echo.c -o test/echo.o
|
||||
|
||||
4
README
4
README
@ -4,6 +4,8 @@ all platform differences in this library.
|
||||
|
||||
http://nodejs.org/
|
||||
|
||||
(This was previously called liboio)
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
Microsoft Windows operating systems since Windows XP sp2. It can be built
|
||||
@ -20,7 +22,7 @@ on all operating systems. This is a large undertaking. Some of the API
|
||||
decisions may seem arbitrary but are actually borne out of various specific
|
||||
platform issues.
|
||||
|
||||
## oio_read_start(), oio_read_stop()
|
||||
## uv_read_start(), uv_read_stop()
|
||||
|
||||
Originally we wanted to provide a asynchronous read function that was
|
||||
similar to WSARecv().
|
||||
|
||||
@ -34,11 +34,11 @@ RUNNER_LINKFLAGS=$(LINKFLAGS)
|
||||
RUNNER_LIBS=-lws2_32
|
||||
RUNNER_SRC=test/runner-win.c
|
||||
|
||||
oio.a: oio-win.o
|
||||
$(AR) rcs oio.a oio-win.o
|
||||
uv.a: uv-win.o
|
||||
$(AR) rcs uv.a uv-win.o
|
||||
|
||||
oio-win.o: oio-win.c oio.h oio-win.h
|
||||
$(CC) $(CFLAGS) -c oio-win.c -o oio-win.o
|
||||
uv-win.o: uv-win.c uv.h uv-win.h
|
||||
$(CC) $(CFLAGS) -c uv-win.c -o uv-win.o
|
||||
|
||||
distclean-platform:
|
||||
clean-platform:
|
||||
|
||||
@ -35,11 +35,11 @@ RUNNER_LINKFLAGS=$(LINKFLAGS) -pthread
|
||||
RUNNER_LIBS=
|
||||
RUNNER_SRC=test/runner-unix.c
|
||||
|
||||
oio.a: oio-unix.o ev/ev.o
|
||||
$(AR) rcs oio.a oio-unix.o ev/ev.o
|
||||
uv.a: uv-unix.o ev/ev.o
|
||||
$(AR) rcs uv.a uv-unix.o ev/ev.o
|
||||
|
||||
oio-unix.o: oio-unix.c oio.h oio-unix.h
|
||||
$(CC) $(CFLAGS) -c oio-unix.c -o oio-unix.o
|
||||
uv-unix.o: uv-unix.c uv.h uv-unix.h
|
||||
$(CC) $(CFLAGS) -c uv-unix.c -o uv-unix.o
|
||||
|
||||
ev/ev.o: ev/config.h ev/ev.c
|
||||
$(MAKE) -C ev
|
||||
|
||||
@ -162,11 +162,11 @@
|
||||
<ClInclude Include="..\test\task.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="liboio.vcxproj">
|
||||
<ProjectReference Include="libuv.vcxproj">
|
||||
<Project>{301fe650-cd34-14e5-6b63-42e383fa02bc}</Project>
|
||||
</ProjectReference>
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
||||
@ -171,11 +171,11 @@
|
||||
<ClInclude Include="..\test\task.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="liboio.vcxproj">
|
||||
<ProjectReference Include="libuv.vcxproj">
|
||||
<Project>{301fe650-cd34-14e5-6b63-42e383fa02bc}</Project>
|
||||
</ProjectReference>
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
|
||||
Microsoft Visual Studio Solution File, Format Version 11.00
|
||||
# Visual Studio 2010
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liboio", "liboio.vcxproj", "{301FE650-CD34-14E5-6B63-42E383FA02BC}"
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libuv", "libuv.vcxproj", "{301FE650-CD34-14E5-6B63-42E383FA02BC}"
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liboio-test", "liboio-test.vcxproj", "{1D7C3F6C-A4AF-DD73-2D20-B2FC919B3744}"
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libuv-test", "libuv-test.vcxproj", "{1D7C3F6C-A4AF-DD73-2D20-B2FC919B3744}"
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liboio-benchmark", "liboio-benchmark.vcxproj", "{65312F30-3B19-A87E-E8D1-491D0F4A6536}"
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libuv-benchmark", "libuv-benchmark.vcxproj", "{65312F30-3B19-A87E-E8D1-491D0F4A6536}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
|
||||
@ -101,24 +101,24 @@
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\ngx-queue.h" />
|
||||
<ClInclude Include="..\oio-unix.h">
|
||||
<ClInclude Include="..\uv-unix.h">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\oio-win.h" />
|
||||
<ClInclude Include="..\oio.h" />
|
||||
<ClInclude Include="..\uv-win.h" />
|
||||
<ClInclude Include="..\uv.h" />
|
||||
<ClInclude Include="..\tree.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\oio-unix.c">
|
||||
<ClCompile Include="..\uv-unix.c">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\oio-win.c" />
|
||||
<ClCompile Include="..\uv-win.c" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<None Include="..\AUTHORS" />
|
||||
@ -130,4 +130,4 @@
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
||||
271
oio.h
271
oio.h
@ -1,271 +0,0 @@
|
||||
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef OIO_H
|
||||
#define OIO_H
|
||||
|
||||
#define OIO_VERSION_MAJOR 0
|
||||
#define OIO_VERSION_MINOR 1
|
||||
|
||||
#include <stdint.h> /* int64_t */
|
||||
#include <sys/types.h> /* size_t */
|
||||
|
||||
typedef struct oio_err_s oio_err_t;
|
||||
typedef struct oio_handle_s oio_handle_t;
|
||||
typedef struct oio_req_s oio_req_t;
|
||||
|
||||
|
||||
#if defined(__unix__) || defined(__POSIX__) || defined(__APPLE__)
|
||||
# include "oio-unix.h"
|
||||
#else
|
||||
# include "oio-win.h"
|
||||
#endif
|
||||
|
||||
|
||||
/* The status parameter is 0 if the request completed successfully,
|
||||
* and should be -1 if the request was cancelled or failed.
|
||||
* For oio_close_cb, -1 means that the handle was closed due to an error.
|
||||
* Error details can be obtained by calling oio_last_error().
|
||||
*
|
||||
* In the case of oio_read_cb the oio_buf returned should be freed by the
|
||||
* user.
|
||||
*/
|
||||
typedef oio_buf (*oio_alloc_cb)(oio_handle_t* handle, size_t suggested_size);
|
||||
typedef void (*oio_read_cb)(oio_handle_t *handle, int nread, oio_buf buf);
|
||||
typedef void (*oio_write_cb)(oio_req_t* req, int status);
|
||||
typedef void (*oio_connect_cb)(oio_req_t* req, int status);
|
||||
typedef void (*oio_shutdown_cb)(oio_req_t* req, int status);
|
||||
typedef void (*oio_accept_cb)(oio_handle_t* handle);
|
||||
typedef void (*oio_close_cb)(oio_handle_t* handle, int status);
|
||||
typedef void (*oio_timer_cb)(oio_req_t* req, int64_t skew, int status);
|
||||
/* TODO: do loop_cb and async_cb really need a status argument? */
|
||||
typedef void (*oio_loop_cb)(oio_handle_t* handle, int status);
|
||||
typedef void (*oio_async_cb)(oio_handle_t* handle, int stats);
|
||||
|
||||
|
||||
/* Expand this list if necessary. */
|
||||
typedef enum {
|
||||
OIO_UNKNOWN = -1,
|
||||
OIO_OK = 0,
|
||||
OIO_EOF,
|
||||
OIO_EACCESS,
|
||||
OIO_EAGAIN,
|
||||
OIO_EADDRINUSE,
|
||||
OIO_EADDRNOTAVAIL,
|
||||
OIO_EAFNOSUPPORT,
|
||||
OIO_EALREADY,
|
||||
OIO_EBADF,
|
||||
OIO_EBUSY,
|
||||
OIO_ECONNABORTED,
|
||||
OIO_ECONNREFUSED,
|
||||
OIO_ECONNRESET,
|
||||
OIO_EDESTADDRREQ,
|
||||
OIO_EFAULT,
|
||||
OIO_EHOSTUNREACH,
|
||||
OIO_EINTR,
|
||||
OIO_EINVAL,
|
||||
OIO_EISCONN,
|
||||
OIO_EMFILE,
|
||||
OIO_ENETDOWN,
|
||||
OIO_ENETUNREACH,
|
||||
OIO_ENFILE,
|
||||
OIO_ENOBUFS,
|
||||
OIO_ENOMEM,
|
||||
OIO_ENONET,
|
||||
OIO_ENOPROTOOPT,
|
||||
OIO_ENOTCONN,
|
||||
OIO_ENOTSOCK,
|
||||
OIO_ENOTSUP,
|
||||
OIO_EPROTO,
|
||||
OIO_EPROTONOSUPPORT,
|
||||
OIO_EPROTOTYPE,
|
||||
OIO_ETIMEDOUT
|
||||
} oio_err_code;
|
||||
|
||||
typedef enum {
|
||||
OIO_UNKNOWN_HANDLE = 0,
|
||||
OIO_TCP,
|
||||
OIO_NAMED_PIPE,
|
||||
OIO_TTY,
|
||||
OIO_FILE,
|
||||
OIO_PREPARE,
|
||||
OIO_CHECK,
|
||||
OIO_IDLE,
|
||||
OIO_ASYNC
|
||||
} oio_handle_type;
|
||||
|
||||
typedef enum {
|
||||
OIO_UNKNOWN_REQ = 0,
|
||||
OIO_CONNECT,
|
||||
OIO_ACCEPT,
|
||||
OIO_READ,
|
||||
OIO_WRITE,
|
||||
OIO_SHUTDOWN,
|
||||
OIO_TIMEOUT,
|
||||
OIO_WAKEUP
|
||||
} oio_req_type;
|
||||
|
||||
|
||||
struct oio_err_s {
|
||||
/* read-only */
|
||||
oio_err_code code;
|
||||
/* private */
|
||||
int sys_errno_;
|
||||
};
|
||||
|
||||
|
||||
struct oio_req_s {
|
||||
/* read-only */
|
||||
oio_req_type type;
|
||||
/* public */
|
||||
oio_handle_t* handle;
|
||||
void* cb;
|
||||
void* data;
|
||||
/* private */
|
||||
oio_req_private_fields
|
||||
};
|
||||
|
||||
|
||||
struct oio_handle_s {
|
||||
/* read-only */
|
||||
oio_handle_type type;
|
||||
/* public */
|
||||
oio_close_cb close_cb;
|
||||
void* data;
|
||||
/* number of bytes queued for writing */
|
||||
size_t write_queue_size;
|
||||
/* private */
|
||||
oio_handle_private_fields
|
||||
};
|
||||
|
||||
|
||||
/* Most functions return boolean: 0 for success and -1 for failure.
|
||||
* On error the user should then call oio_last_error() to determine
|
||||
* the error code.
|
||||
*/
|
||||
oio_err_t oio_last_error();
|
||||
char* oio_strerror(oio_err_t err);
|
||||
|
||||
|
||||
void oio_init(oio_alloc_cb alloc);
|
||||
int oio_run();
|
||||
|
||||
/* Manually modify the event loop's reference count. Useful if the user wants
|
||||
* to have a handle or timeout that doesn't keep the loop alive.
|
||||
*/
|
||||
void oio_ref();
|
||||
void oio_unref();
|
||||
|
||||
void oio_update_time();
|
||||
int64_t oio_now();
|
||||
|
||||
void oio_req_init(oio_req_t* req, oio_handle_t* handle, void* cb);
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* - oio_(pipe|pipe_tty)_handle_init
|
||||
* - oio_bind_pipe(char* name)
|
||||
* - oio_continuous_read(oio_handle_t* handle, oio_continuous_read_cb* cb)
|
||||
* - A way to list cancelled oio_reqs after before/on oio_close_cb
|
||||
*/
|
||||
|
||||
/* TCP socket methods.
|
||||
* Handle and callback bust be set by calling oio_req_init.
|
||||
*/
|
||||
int oio_tcp_init(oio_handle_t* handle, oio_close_cb close_cb, void* data);
|
||||
int oio_bind(oio_handle_t* handle, struct sockaddr* addr);
|
||||
|
||||
int oio_connect(oio_req_t* req, struct sockaddr* addr);
|
||||
int oio_shutdown(oio_req_t* req);
|
||||
|
||||
/* TCP server methods. */
|
||||
int oio_listen(oio_handle_t* handle, int backlog, oio_accept_cb cb);
|
||||
|
||||
/* Call this after accept_cb. client does not need to be initialized. */
|
||||
int oio_accept(oio_handle_t* server, oio_handle_t* client,
|
||||
oio_close_cb close_cb, void* data);
|
||||
|
||||
|
||||
/* Read data from an incoming stream. The callback will be made several
|
||||
* several times until there is no more data to read or oio_read_stop is
|
||||
* called. When we've reached EOF nread will be set to -1 and the error is
|
||||
* set to OIO_EOF. When nread == -1 the buf parameter might not point to a
|
||||
* valid buffer; in that case buf.len and buf.base are both set to 0.
|
||||
* Note that nread might also be 0, which does *not* indicate an error or
|
||||
* eof; it happens when liboio requested a buffer through the alloc callback
|
||||
* but then decided that it didn't need that buffer.
|
||||
*/
|
||||
int oio_read_start(oio_handle_t* handle, oio_read_cb cb);
|
||||
int oio_read_stop(oio_handle_t* handle);
|
||||
|
||||
int oio_write(oio_req_t* req, oio_buf bufs[], int bufcnt);
|
||||
|
||||
/* Timer methods */
|
||||
int oio_timeout(oio_req_t* req, int64_t timeout);
|
||||
|
||||
/* libev wrapper. Every active prepare handle gets its callback called
|
||||
* exactly once per loop iteration, just before the system blocks to wait
|
||||
* for completed i/o.
|
||||
*/
|
||||
int oio_prepare_init(oio_handle_t* handle, oio_close_cb close_cb, void* data);
|
||||
int oio_prepare_start(oio_handle_t* handle, oio_loop_cb cb);
|
||||
int oio_prepare_stop(oio_handle_t* handle);
|
||||
|
||||
/* libev wrapper. Every active check handle gets its callback called exactly
|
||||
* once per loop iteration, just after the system returns from blocking.
|
||||
*/
|
||||
int oio_check_init(oio_handle_t* handle, oio_close_cb close_cb, void* data);
|
||||
int oio_check_start(oio_handle_t* handle, oio_loop_cb cb);
|
||||
int oio_check_stop(oio_handle_t* handle);
|
||||
|
||||
/* libev wrapper. Every active idle handle gets its callback called repeatedly until it is
|
||||
* stopped. This happens after all other types of callbacks are processed.
|
||||
* When there are multiple "idle" handles active, their callbacks are called
|
||||
* in turn.
|
||||
*/
|
||||
int oio_idle_init(oio_handle_t* handle, oio_close_cb close_cb, void* data);
|
||||
int oio_idle_start(oio_handle_t* handle, oio_loop_cb cb);
|
||||
int oio_idle_stop(oio_handle_t* handle);
|
||||
|
||||
/* libev wrapper. oio_async_send wakes up the event loop and calls the async
|
||||
* handle's callback There is no guarantee that every oio_async_send call
|
||||
* leads to exactly one invocation of the callback; The only guarantee is
|
||||
* that the callback function is called at least once after the call to
|
||||
* async_send. Unlike everything else, oio_async_send can be called from
|
||||
* another thread.
|
||||
*
|
||||
* QUESTION(ryan) Can OIO_ASYNC just use oio_loop_cb? Same signature on my
|
||||
* side.
|
||||
*/
|
||||
int oio_async_init(oio_handle_t* handle, oio_async_cb async_cb,
|
||||
oio_close_cb close_cb, void* data);
|
||||
int oio_async_send(oio_handle_t* handle);
|
||||
|
||||
/* Request handle to be closed. close_cb will be called
|
||||
* asynchronously after this call.
|
||||
*/
|
||||
int oio_close(oio_handle_t* handle);
|
||||
|
||||
|
||||
/* Utility */
|
||||
struct sockaddr_in oio_ip4_addr(char* ip, int port);
|
||||
|
||||
#endif /* OIO_H */
|
||||
@ -19,7 +19,7 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
@ -33,13 +33,13 @@
|
||||
typedef struct {
|
||||
int pongs;
|
||||
int state;
|
||||
oio_handle_t handle;
|
||||
oio_req_t connect_req;
|
||||
oio_req_t shutdown_req;
|
||||
uv_handle_t handle;
|
||||
uv_req_t connect_req;
|
||||
uv_req_t shutdown_req;
|
||||
} pinger_t;
|
||||
|
||||
typedef struct buf_s {
|
||||
oio_buf oio_buf;
|
||||
uv_buf uv_buf;
|
||||
struct buf_s* next;
|
||||
} buf_t;
|
||||
|
||||
@ -52,33 +52,33 @@ static int completed_pingers = 0;
|
||||
static int64_t start_time;
|
||||
|
||||
|
||||
static oio_buf buf_alloc(oio_handle_t* handle, size_t size) {
|
||||
static uv_buf buf_alloc(uv_handle_t* handle, size_t size) {
|
||||
buf_t* ab;
|
||||
|
||||
ab = buf_freelist;
|
||||
|
||||
if (ab != NULL) {
|
||||
buf_freelist = ab->next;
|
||||
return ab->oio_buf;
|
||||
return ab->uv_buf;
|
||||
}
|
||||
|
||||
ab = (buf_t*) malloc(size + sizeof *ab);
|
||||
ab->oio_buf.len = size;
|
||||
ab->oio_buf.base = ((char*) ab) + sizeof *ab;
|
||||
ab->uv_buf.len = size;
|
||||
ab->uv_buf.base = ((char*) ab) + sizeof *ab;
|
||||
|
||||
return ab->oio_buf;
|
||||
return ab->uv_buf;
|
||||
}
|
||||
|
||||
|
||||
static void buf_free(oio_buf oio_buf) {
|
||||
buf_t* ab = (buf_t*) (oio_buf.base - sizeof *ab);
|
||||
static void buf_free(uv_buf uv_buf) {
|
||||
buf_t* ab = (buf_t*) (uv_buf.base - sizeof *ab);
|
||||
|
||||
ab->next = buf_freelist;
|
||||
buf_freelist = ab;
|
||||
}
|
||||
|
||||
|
||||
static void pinger_close_cb(oio_handle_t* handle, int status) {
|
||||
static void pinger_close_cb(uv_handle_t* handle, int status) {
|
||||
pinger_t* pinger;
|
||||
|
||||
ASSERT(status == 0);
|
||||
@ -92,7 +92,7 @@ static void pinger_close_cb(oio_handle_t* handle, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void pinger_write_cb(oio_req_t *req, int status) {
|
||||
static void pinger_write_cb(uv_req_t *req, int status) {
|
||||
ASSERT(status == 0);
|
||||
|
||||
free(req);
|
||||
@ -100,34 +100,34 @@ static void pinger_write_cb(oio_req_t *req, int status) {
|
||||
|
||||
|
||||
static void pinger_write_ping(pinger_t* pinger) {
|
||||
oio_req_t *req;
|
||||
oio_buf buf;
|
||||
uv_req_t *req;
|
||||
uv_buf buf;
|
||||
|
||||
buf.base = (char*)&PING;
|
||||
buf.len = strlen(PING);
|
||||
|
||||
req = (oio_req_t*)malloc(sizeof(*req));
|
||||
oio_req_init(req, &pinger->handle, pinger_write_cb);
|
||||
req = (uv_req_t*)malloc(sizeof(*req));
|
||||
uv_req_init(req, &pinger->handle, pinger_write_cb);
|
||||
|
||||
if (oio_write(req, &buf, 1)) {
|
||||
FATAL("oio_write failed");
|
||||
if (uv_write(req, &buf, 1)) {
|
||||
FATAL("uv_write failed");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void pinger_shutdown_cb(oio_handle_t* handle, int status) {
|
||||
static void pinger_shutdown_cb(uv_handle_t* handle, int status) {
|
||||
ASSERT(status == 0);
|
||||
}
|
||||
|
||||
|
||||
static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
|
||||
unsigned int i;
|
||||
pinger_t* pinger;
|
||||
|
||||
pinger = (pinger_t*)handle->data;
|
||||
|
||||
if (nread < 0) {
|
||||
ASSERT(oio_last_error().code == OIO_EOF);
|
||||
ASSERT(uv_last_error().code == UV_EOF);
|
||||
|
||||
if (buf.base) {
|
||||
buf_free(buf);
|
||||
@ -142,9 +142,9 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
pinger->state = (pinger->state + 1) % (sizeof(PING) - 1);
|
||||
if (pinger->state == 0) {
|
||||
pinger->pongs++;
|
||||
if (oio_now() - start_time > TIME) {
|
||||
oio_req_init(&pinger->shutdown_req, handle, pinger_shutdown_cb);
|
||||
oio_shutdown(&pinger->shutdown_req);
|
||||
if (uv_now() - start_time > TIME) {
|
||||
uv_req_init(&pinger->shutdown_req, handle, pinger_shutdown_cb);
|
||||
uv_shutdown(&pinger->shutdown_req);
|
||||
break;
|
||||
return;
|
||||
} else {
|
||||
@ -157,23 +157,23 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
}
|
||||
|
||||
|
||||
static void pinger_connect_cb(oio_req_t *req, int status) {
|
||||
static void pinger_connect_cb(uv_req_t *req, int status) {
|
||||
pinger_t *pinger = (pinger_t*)req->handle->data;
|
||||
|
||||
ASSERT(status == 0);
|
||||
|
||||
pinger_write_ping(pinger);
|
||||
|
||||
if (oio_read_start(req->handle, pinger_read_cb)) {
|
||||
FATAL("oio_read_start failed");
|
||||
if (uv_read_start(req->handle, pinger_read_cb)) {
|
||||
FATAL("uv_read_start failed");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void pinger_new() {
|
||||
int r;
|
||||
struct sockaddr_in client_addr = oio_ip4_addr("0.0.0.0", 0);
|
||||
struct sockaddr_in server_addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
struct sockaddr_in client_addr = uv_ip4_addr("0.0.0.0", 0);
|
||||
struct sockaddr_in server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
pinger_t *pinger;
|
||||
|
||||
pinger = (pinger_t*)malloc(sizeof(*pinger));
|
||||
@ -181,25 +181,25 @@ static void pinger_new() {
|
||||
pinger->pongs = 0;
|
||||
|
||||
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
|
||||
r = oio_tcp_init(&pinger->handle, pinger_close_cb, (void*)pinger);
|
||||
r = uv_tcp_init(&pinger->handle, pinger_close_cb, (void*)pinger);
|
||||
ASSERT(!r);
|
||||
|
||||
/* We are never doing multiple reads/connects at a time anyway. */
|
||||
/* so these handles can be pre-initialized. */
|
||||
oio_req_init(&pinger->connect_req, &pinger->handle, pinger_connect_cb);
|
||||
uv_req_init(&pinger->connect_req, &pinger->handle, pinger_connect_cb);
|
||||
|
||||
oio_bind(&pinger->handle, (struct sockaddr*)&client_addr);
|
||||
r = oio_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
|
||||
uv_bind(&pinger->handle, (struct sockaddr*)&client_addr);
|
||||
r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
|
||||
ASSERT(!r);
|
||||
}
|
||||
|
||||
|
||||
BENCHMARK_IMPL(ping_pongs) {
|
||||
oio_init(buf_alloc);
|
||||
start_time = oio_now();
|
||||
uv_init(buf_alloc);
|
||||
start_time = uv_now();
|
||||
|
||||
pinger_new();
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(completed_pingers == 1);
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
*/
|
||||
|
||||
#include "task.h"
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
@ -35,14 +35,14 @@
|
||||
#define STATS_COUNT 5
|
||||
|
||||
|
||||
static void do_write(oio_handle_t* handle);
|
||||
static void do_write(uv_handle_t* handle);
|
||||
static void maybe_connect_some();
|
||||
|
||||
static oio_req_t* req_alloc();
|
||||
static void req_free(oio_req_t* oio_req);
|
||||
static uv_req_t* req_alloc();
|
||||
static void req_free(uv_req_t* uv_req);
|
||||
|
||||
static oio_buf buf_alloc(oio_handle_t* handle, size_t size);
|
||||
static void buf_free(oio_buf oio_buf);
|
||||
static uv_buf buf_alloc(uv_handle_t* handle, size_t size);
|
||||
static void buf_free(uv_buf uv_buf);
|
||||
|
||||
|
||||
static struct sockaddr_in server_addr;
|
||||
@ -62,8 +62,8 @@ static int stats_left = 0;
|
||||
|
||||
static char write_buffer[WRITE_BUFFER_SIZE];
|
||||
|
||||
static oio_handle_t read_handles[TARGET_CONNECTIONS];
|
||||
static oio_handle_t write_handles[TARGET_CONNECTIONS];
|
||||
static uv_handle_t read_handles[TARGET_CONNECTIONS];
|
||||
static uv_handle_t write_handles[TARGET_CONNECTIONS];
|
||||
|
||||
|
||||
static double gbit(int64_t bytes, int64_t passed_ms) {
|
||||
@ -72,7 +72,7 @@ static double gbit(int64_t bytes, int64_t passed_ms) {
|
||||
}
|
||||
|
||||
|
||||
static void show_stats(oio_req_t *req, int64_t skew, int status) {
|
||||
static void show_stats(uv_req_t *req, int64_t skew, int status) {
|
||||
int64_t msec = STATS_INTERVAL + skew;
|
||||
|
||||
#if PRINT_STATS
|
||||
@ -98,30 +98,30 @@ static void show_stats(oio_req_t *req, int64_t skew, int status) {
|
||||
nrecv = 0;
|
||||
nsent = 0;
|
||||
|
||||
oio_timeout(req, (STATS_INTERVAL - skew > 0)
|
||||
uv_timeout(req, (STATS_INTERVAL - skew > 0)
|
||||
? STATS_INTERVAL - skew
|
||||
: 0);
|
||||
}
|
||||
|
||||
|
||||
static void start_stats_collection() {
|
||||
oio_req_t* req = req_alloc();
|
||||
uv_req_t* req = req_alloc();
|
||||
int r;
|
||||
|
||||
/* Show-stats timeout */
|
||||
stats_left = STATS_COUNT;
|
||||
oio_req_init(req, NULL, (void*)show_stats);
|
||||
r = oio_timeout(req, STATS_INTERVAL);
|
||||
uv_req_init(req, NULL, (void*)show_stats);
|
||||
r = uv_timeout(req, STATS_INTERVAL);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
|
||||
void close_cb(oio_handle_t* handle, int status) {
|
||||
void close_cb(uv_handle_t* handle, int status) {
|
||||
ASSERT(status == 0);
|
||||
}
|
||||
|
||||
|
||||
static void read_cb(oio_handle_t* handle, int bytes, oio_buf buf) {
|
||||
static void read_cb(uv_handle_t* handle, int bytes, uv_buf buf) {
|
||||
ASSERT(bytes >= 0);
|
||||
|
||||
buf_free(buf);
|
||||
@ -131,8 +131,8 @@ static void read_cb(oio_handle_t* handle, int bytes, oio_buf buf) {
|
||||
}
|
||||
|
||||
|
||||
static void write_cb(oio_req_t *req, int status) {
|
||||
oio_buf* buf = (oio_buf*) req->data;
|
||||
static void write_cb(uv_req_t *req, int status) {
|
||||
uv_buf* buf = (uv_buf*) req->data;
|
||||
|
||||
ASSERT(status == 0);
|
||||
|
||||
@ -145,9 +145,9 @@ static void write_cb(oio_req_t *req, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void do_write(oio_handle_t* handle) {
|
||||
oio_req_t* req;
|
||||
oio_buf buf;
|
||||
static void do_write(uv_handle_t* handle) {
|
||||
uv_req_t* req;
|
||||
uv_buf buf;
|
||||
int r;
|
||||
|
||||
buf.base = (char*) &write_buffer;
|
||||
@ -155,9 +155,9 @@ static void do_write(oio_handle_t* handle) {
|
||||
|
||||
while (handle->write_queue_size == 0) {
|
||||
req = req_alloc();
|
||||
oio_req_init(req, handle, write_cb);
|
||||
uv_req_init(req, handle, write_cb);
|
||||
|
||||
r = oio_write(req, &buf, 1);
|
||||
r = uv_write(req, &buf, 1);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
}
|
||||
@ -177,8 +177,8 @@ static void maybe_start_writing() {
|
||||
}
|
||||
|
||||
|
||||
static void connect_cb(oio_req_t* req, int status) {
|
||||
if (status) LOG(oio_strerror(oio_last_error()));
|
||||
static void connect_cb(uv_req_t* req, int status) {
|
||||
if (status) LOG(uv_strerror(uv_last_error()));
|
||||
ASSERT(status == 0);
|
||||
|
||||
write_sockets++;
|
||||
@ -189,16 +189,16 @@ static void connect_cb(oio_req_t* req, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void do_connect(oio_handle_t* handle, struct sockaddr* addr) {
|
||||
oio_req_t* req;
|
||||
static void do_connect(uv_handle_t* handle, struct sockaddr* addr) {
|
||||
uv_req_t* req;
|
||||
int r;
|
||||
|
||||
r = oio_tcp_init(handle, close_cb, NULL);
|
||||
r = uv_tcp_init(handle, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
|
||||
req = req_alloc();
|
||||
oio_req_init(req, handle, connect_cb);
|
||||
r = oio_connect(req, addr);
|
||||
uv_req_init(req, handle, connect_cb);
|
||||
r = uv_connect(req, addr);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
@ -212,17 +212,17 @@ static void maybe_connect_some() {
|
||||
}
|
||||
|
||||
|
||||
static void accept_cb(oio_handle_t* server) {
|
||||
oio_handle_t* handle;
|
||||
static void accept_cb(uv_handle_t* server) {
|
||||
uv_handle_t* handle;
|
||||
int r;
|
||||
|
||||
ASSERT(read_sockets < TARGET_CONNECTIONS);
|
||||
handle = &read_handles[read_sockets];
|
||||
|
||||
r = oio_accept(server, handle, close_cb, NULL);
|
||||
r = uv_accept(server, handle, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = oio_read_start(handle, read_cb);
|
||||
r = uv_read_start(handle, read_cb);
|
||||
ASSERT(r == 0);
|
||||
|
||||
read_sockets++;
|
||||
@ -232,18 +232,18 @@ static void accept_cb(oio_handle_t* server) {
|
||||
|
||||
|
||||
BENCHMARK_IMPL(pump) {
|
||||
oio_handle_t server;
|
||||
uv_handle_t server;
|
||||
int r;
|
||||
|
||||
oio_init(buf_alloc);
|
||||
uv_init(buf_alloc);
|
||||
|
||||
/* Server */
|
||||
server_addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
r = oio_tcp_init(&server, close_cb, NULL);
|
||||
server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
r = uv_tcp_init(&server, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_bind(&server, (struct sockaddr*) &server_addr);
|
||||
r = uv_bind(&server, (struct sockaddr*) &server_addr);
|
||||
ASSERT(r == 0);
|
||||
r = oio_listen(&server, TARGET_CONNECTIONS, accept_cb);
|
||||
r = uv_listen(&server, TARGET_CONNECTIONS, accept_cb);
|
||||
ASSERT(r == 0);
|
||||
|
||||
oio_update_time();
|
||||
@ -252,7 +252,7 @@ BENCHMARK_IMPL(pump) {
|
||||
/* Start making connections */
|
||||
maybe_connect_some();
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -263,7 +263,7 @@ BENCHMARK_IMPL(pump) {
|
||||
*/
|
||||
|
||||
typedef struct req_list_s {
|
||||
oio_req_t oio_req;
|
||||
uv_req_t uv_req;
|
||||
struct req_list_s* next;
|
||||
} req_list_t;
|
||||
|
||||
@ -271,22 +271,22 @@ typedef struct req_list_s {
|
||||
static req_list_t* req_freelist = NULL;
|
||||
|
||||
|
||||
static oio_req_t* req_alloc() {
|
||||
static uv_req_t* req_alloc() {
|
||||
req_list_t* req;
|
||||
|
||||
req = req_freelist;
|
||||
if (req != NULL) {
|
||||
req_freelist = req->next;
|
||||
return (oio_req_t*) req;
|
||||
return (uv_req_t*) req;
|
||||
}
|
||||
|
||||
req = (req_list_t*) malloc(sizeof *req);
|
||||
return (oio_req_t*) req;
|
||||
return (uv_req_t*) req;
|
||||
}
|
||||
|
||||
|
||||
static void req_free(oio_req_t* oio_req) {
|
||||
req_list_t* req = (req_list_t*) oio_req;
|
||||
static void req_free(uv_req_t* uv_req) {
|
||||
req_list_t* req = (req_list_t*) uv_req;
|
||||
|
||||
req->next = req_freelist;
|
||||
req_freelist = req;
|
||||
@ -298,7 +298,7 @@ static void req_free(oio_req_t* oio_req) {
|
||||
*/
|
||||
|
||||
typedef struct buf_list_s {
|
||||
oio_buf oio_buf;
|
||||
uv_buf uv_buf;
|
||||
struct buf_list_s* next;
|
||||
} buf_list_t;
|
||||
|
||||
@ -306,25 +306,25 @@ typedef struct buf_list_s {
|
||||
static buf_list_t* buf_freelist = NULL;
|
||||
|
||||
|
||||
static oio_buf buf_alloc(oio_handle_t* handle, size_t size) {
|
||||
static uv_buf buf_alloc(uv_handle_t* handle, size_t size) {
|
||||
buf_list_t* buf;
|
||||
|
||||
buf = buf_freelist;
|
||||
if (buf != NULL) {
|
||||
buf_freelist = buf->next;
|
||||
return buf->oio_buf;
|
||||
return buf->uv_buf;
|
||||
}
|
||||
|
||||
buf = (buf_list_t*) malloc(size + sizeof *buf);
|
||||
buf->oio_buf.len = (unsigned int)size;
|
||||
buf->oio_buf.base = ((char*) buf) + sizeof *buf;
|
||||
buf->uv_buf.len = (unsigned int)size;
|
||||
buf->uv_buf.base = ((char*) buf) + sizeof *buf;
|
||||
|
||||
return buf->oio_buf;
|
||||
return buf->uv_buf;
|
||||
}
|
||||
|
||||
|
||||
static void buf_free(oio_buf oio_buf) {
|
||||
buf_list_t* buf = (buf_list_t*) (oio_buf.base - sizeof *buf);
|
||||
static void buf_free(uv_buf uv_buf) {
|
||||
buf_list_t* buf = (buf_list_t*) (uv_buf.base - sizeof *buf);
|
||||
|
||||
buf->next = buf_freelist;
|
||||
buf_freelist = buf;
|
||||
|
||||
@ -20,11 +20,11 @@
|
||||
*/
|
||||
|
||||
#include "task.h"
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
|
||||
|
||||
BENCHMARK_IMPL(sizes) {
|
||||
LOGF("oio_handle_t: %lu bytes\n", sizeof(oio_handle_t));
|
||||
LOGF("oio_req_t: %lu bytes\n", sizeof(oio_req_t));
|
||||
LOGF("uv_handle_t: %lu bytes\n", sizeof(uv_handle_t));
|
||||
LOGF("uv_req_t: %lu bytes\n", sizeof(uv_req_t));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -19,33 +19,33 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
typedef struct {
|
||||
oio_req_t req;
|
||||
oio_buf buf;
|
||||
uv_req_t req;
|
||||
uv_buf buf;
|
||||
} write_req_t;
|
||||
|
||||
|
||||
static oio_handle_t server;
|
||||
static uv_handle_t server;
|
||||
|
||||
|
||||
static void after_write(oio_req_t* req, int status);
|
||||
static void after_read(oio_handle_t* handle, int nread, oio_buf buf);
|
||||
static void on_close(oio_handle_t* peer, int status);
|
||||
static void on_accept(oio_handle_t* handle);
|
||||
static void after_write(uv_req_t* req, int status);
|
||||
static void after_read(uv_handle_t* handle, int nread, uv_buf buf);
|
||||
static void on_close(uv_handle_t* peer, int status);
|
||||
static void on_accept(uv_handle_t* handle);
|
||||
|
||||
|
||||
static void after_write(oio_req_t* req, int status) {
|
||||
static void after_write(uv_req_t* req, int status) {
|
||||
write_req_t* wr;
|
||||
|
||||
if (status) {
|
||||
oio_err_t err = oio_last_error();
|
||||
fprintf(stderr, "oio_write error: %s\n", oio_strerror(err));
|
||||
uv_err_t err = uv_last_error();
|
||||
fprintf(stderr, "uv_write error: %s\n", uv_strerror(err));
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
@ -57,26 +57,26 @@ static void after_write(oio_req_t* req, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void after_shutdown(oio_req_t* req, int status) {
|
||||
static void after_shutdown(uv_req_t* req, int status) {
|
||||
free(req);
|
||||
}
|
||||
|
||||
|
||||
static void after_read(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
static void after_read(uv_handle_t* handle, int nread, uv_buf buf) {
|
||||
write_req_t *wr;
|
||||
oio_req_t* req;
|
||||
uv_req_t* req;
|
||||
|
||||
if (nread < 0) {
|
||||
/* Error or EOF */
|
||||
ASSERT (oio_last_error().code == OIO_EOF);
|
||||
ASSERT (uv_last_error().code == UV_EOF);
|
||||
|
||||
if (buf.base) {
|
||||
free(buf.base);
|
||||
}
|
||||
|
||||
req = (oio_req_t*) malloc(sizeof *req);
|
||||
oio_req_init(req, handle, after_shutdown);
|
||||
oio_shutdown(req);
|
||||
req = (uv_req_t*) malloc(sizeof *req);
|
||||
uv_req_init(req, handle, after_shutdown);
|
||||
uv_shutdown(req);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -89,58 +89,58 @@ static void after_read(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
|
||||
wr = (write_req_t*) malloc(sizeof *wr);
|
||||
|
||||
oio_req_init(&wr->req, handle, after_write);
|
||||
uv_req_init(&wr->req, handle, after_write);
|
||||
wr->buf.base = buf.base;
|
||||
wr->buf.len = nread;
|
||||
if (oio_write(&wr->req, &wr->buf, 1)) {
|
||||
FATAL("oio_write failed");
|
||||
if (uv_write(&wr->req, &wr->buf, 1)) {
|
||||
FATAL("uv_write failed");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void on_close(oio_handle_t* peer, int status) {
|
||||
static void on_close(uv_handle_t* peer, int status) {
|
||||
if (status != 0) {
|
||||
fprintf(stdout, "Socket error\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void on_accept(oio_handle_t* server) {
|
||||
oio_handle_t* handle = (oio_handle_t*) malloc(sizeof *handle);
|
||||
static void on_accept(uv_handle_t* server) {
|
||||
uv_handle_t* handle = (uv_handle_t*) malloc(sizeof *handle);
|
||||
|
||||
if (oio_accept(server, handle, on_close, NULL)) {
|
||||
FATAL("oio_accept failed");
|
||||
if (uv_accept(server, handle, on_close, NULL)) {
|
||||
FATAL("uv_accept failed");
|
||||
}
|
||||
|
||||
oio_read_start(handle, after_read);
|
||||
uv_read_start(handle, after_read);
|
||||
}
|
||||
|
||||
|
||||
static void on_server_close(oio_handle_t* handle, int status) {
|
||||
static void on_server_close(uv_handle_t* handle, int status) {
|
||||
ASSERT(handle == &server);
|
||||
ASSERT(status == 0);
|
||||
}
|
||||
|
||||
|
||||
static int echo_start(int port) {
|
||||
struct sockaddr_in addr = oio_ip4_addr("0.0.0.0", port);
|
||||
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", port);
|
||||
int r;
|
||||
|
||||
r = oio_tcp_init(&server, on_server_close, NULL);
|
||||
r = uv_tcp_init(&server, on_server_close, NULL);
|
||||
if (r) {
|
||||
/* TODO: Error codes */
|
||||
fprintf(stderr, "Socket creation error\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
r = oio_bind(&server, (struct sockaddr*) &addr);
|
||||
r = uv_bind(&server, (struct sockaddr*) &addr);
|
||||
if (r) {
|
||||
/* TODO: Error codes */
|
||||
fprintf(stderr, "Bind error\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
r = oio_listen(&server, 128, on_accept);
|
||||
r = uv_listen(&server, 128, on_accept);
|
||||
if (r) {
|
||||
/* TODO: Error codes */
|
||||
fprintf(stderr, "Listen error\n");
|
||||
@ -152,12 +152,12 @@ static int echo_start(int port) {
|
||||
|
||||
|
||||
static int echo_stop() {
|
||||
return oio_close(&server);
|
||||
return uv_close(&server);
|
||||
}
|
||||
|
||||
|
||||
static oio_buf echo_alloc(oio_handle_t* handle, size_t suggested_size) {
|
||||
oio_buf buf;
|
||||
static uv_buf echo_alloc(uv_handle_t* handle, size_t suggested_size) {
|
||||
uv_buf buf;
|
||||
buf.base = (char*) malloc(suggested_size);
|
||||
buf.len = suggested_size;
|
||||
return buf;
|
||||
@ -165,11 +165,11 @@ static oio_buf echo_alloc(oio_handle_t* handle, size_t suggested_size) {
|
||||
|
||||
|
||||
HELPER_IMPL(echo_server) {
|
||||
oio_init(echo_alloc);
|
||||
uv_init(echo_alloc);
|
||||
if (echo_start(TEST_PORT))
|
||||
return 1;
|
||||
|
||||
fprintf(stderr, "Listening!\n");
|
||||
oio_run();
|
||||
uv_run();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -309,12 +309,12 @@ void rewind_cursor() {
|
||||
}
|
||||
|
||||
|
||||
typedef void* (*oio_thread_cb)(void* arg);
|
||||
typedef void* (*uv_thread_cb)(void* arg);
|
||||
|
||||
|
||||
uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg) {
|
||||
uintptr_t uv_create_thread(void (*entry)(void* arg), void* arg) {
|
||||
pthread_t t;
|
||||
oio_thread_cb cb = (oio_thread_cb)entry;
|
||||
uv_thread_cb cb = (uv_thread_cb)entry;
|
||||
int r = pthread_create(&t, NULL, cb, arg);
|
||||
|
||||
if (r) {
|
||||
@ -328,12 +328,12 @@ uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg) {
|
||||
/* Wait for a thread to terminate. Should return 0 if the thread ended, -1 on
|
||||
* error.
|
||||
*/
|
||||
int oio_wait_thread(uintptr_t thread_id) {
|
||||
int uv_wait_thread(uintptr_t thread_id) {
|
||||
return pthread_join((pthread_t)thread_id, NULL);
|
||||
}
|
||||
|
||||
|
||||
/* Pause the calling thread for a number of milliseconds. */
|
||||
void oio_sleep(int msec) {
|
||||
void uv_sleep(int msec) {
|
||||
usleep(msec);
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ int process_start(char *name, process_info_t *p) {
|
||||
|
||||
if (GetTempPathW(sizeof(path) / sizeof(WCHAR), (WCHAR*)&path) == 0)
|
||||
goto error;
|
||||
if (GetTempFileNameW((WCHAR*)&path, L"oio", 0, (WCHAR*)&filename) == 0)
|
||||
if (GetTempFileNameW((WCHAR*)&path, L"uv", 0, (WCHAR*)&filename) == 0)
|
||||
goto error;
|
||||
|
||||
file = CreateFileW((WCHAR*)filename,
|
||||
@ -283,7 +283,7 @@ static unsigned __stdcall create_thread_helper(void* info) {
|
||||
|
||||
|
||||
/* Create a thread. Returns the thread identifier, or 0 on failure. */
|
||||
uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg) {
|
||||
uintptr_t uv_create_thread(void (*entry)(void* arg), void* arg) {
|
||||
uintptr_t result;
|
||||
thread_info_t* info;
|
||||
|
||||
@ -314,7 +314,7 @@ uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg) {
|
||||
/* Wait for a thread to terminate. Should return 0 if the thread ended, -1 on
|
||||
* error.
|
||||
*/
|
||||
int oio_wait_thread(uintptr_t thread_id) {
|
||||
int uv_wait_thread(uintptr_t thread_id) {
|
||||
if (WaitForSingleObject((HANDLE)thread_id, INFINITE) != WAIT_OBJECT_0) {
|
||||
return -1;
|
||||
}
|
||||
@ -324,6 +324,6 @@ int oio_wait_thread(uintptr_t thread_id) {
|
||||
|
||||
|
||||
/* Pause the calling thread for a number of milliseconds. */
|
||||
void oio_sleep(int msec) {
|
||||
void uv_sleep(int msec) {
|
||||
Sleep(msec);
|
||||
}
|
||||
|
||||
@ -76,14 +76,14 @@
|
||||
|
||||
|
||||
/* Create a thread. Returns the thread identifier, or 0 on failure. */
|
||||
uintptr_t oio_create_thread(void (*entry)(void* arg), void* arg);
|
||||
uintptr_t uv_create_thread(void (*entry)(void* arg), void* arg);
|
||||
|
||||
/* Wait for a thread to terminate. Should return 0 if the thread ended, -1 on
|
||||
* error.
|
||||
*/
|
||||
int oio_wait_thread(uintptr_t thread_id);
|
||||
int uv_wait_thread(uintptr_t thread_id);
|
||||
|
||||
/* Pause the calling thread for a number of milliseconds. */
|
||||
void oio_sleep(int msec);
|
||||
void uv_sleep(int msec);
|
||||
|
||||
#endif /* TASK_H_ */
|
||||
|
||||
@ -19,16 +19,16 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
static oio_handle_t prepare_handle;
|
||||
static uv_handle_t prepare_handle;
|
||||
|
||||
static oio_handle_t async1_handle;
|
||||
/* static oio_handle_t async2_handle; */
|
||||
static uv_handle_t async1_handle;
|
||||
/* static uv_handle_t async2_handle; */
|
||||
|
||||
static int prepare_cb_called = 0;
|
||||
|
||||
@ -49,20 +49,20 @@ static uintptr_t thread3_id = 0;
|
||||
void thread1_entry(void *arg) {
|
||||
int state = 0;
|
||||
|
||||
oio_sleep(50);
|
||||
uv_sleep(50);
|
||||
|
||||
while (1) {
|
||||
switch (async1_cb_called) {
|
||||
case 0:
|
||||
oio_async_send(&async1_handle);
|
||||
uv_async_send(&async1_handle);
|
||||
break;
|
||||
|
||||
case 1:
|
||||
oio_async_send(&async1_handle);
|
||||
uv_async_send(&async1_handle);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
oio_async_send(&async1_handle);
|
||||
uv_async_send(&async1_handle);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -72,47 +72,47 @@ void thread1_entry(void *arg) {
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* Thread 2 calls oio_async_send on async_handle_2 8 times. */
|
||||
/* Thread 2 calls uv_async_send on async_handle_2 8 times. */
|
||||
void thread2_entry(void *arg) {
|
||||
int i;
|
||||
|
||||
while (1) {
|
||||
switch (async1_cb_called) {
|
||||
case 0:
|
||||
oio_async_send(&async2_handle);
|
||||
uv_async_send(&async2_handle);
|
||||
break;
|
||||
|
||||
case 1:
|
||||
oio_async_send(&async2_handle);
|
||||
uv_async_send(&async2_handle);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
oio_async_send(&async2_handle);
|
||||
uv_async_send(&async2_handle);
|
||||
break;
|
||||
}
|
||||
oio_sleep(5);
|
||||
uv_sleep(5);
|
||||
}
|
||||
|
||||
if (async1_cb_called == 20) {
|
||||
oio_close(handle);
|
||||
uv_close(handle);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Thread 3 calls oio_async_send on async_handle_2 8 times
|
||||
/* Thread 3 calls uv_async_send on async_handle_2 8 times
|
||||
* after waiting half a second first.
|
||||
*/
|
||||
void thread3_entry(void *arg) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
oio_async_send(&async2_handle);
|
||||
uv_async_send(&async2_handle);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void close_cb(oio_handle_t* handle, int status) {
|
||||
static void close_cb(uv_handle_t* handle, int status) {
|
||||
ASSERT(handle != NULL);
|
||||
ASSERT(status == 0);
|
||||
|
||||
@ -120,14 +120,14 @@ static void close_cb(oio_handle_t* handle, int status) {
|
||||
}
|
||||
|
||||
|
||||
static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
oio_buf buf = {0, 0};
|
||||
static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
|
||||
uv_buf buf = {0, 0};
|
||||
FATAL("alloc should not be called");
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
static void async1_cb(oio_handle_t* handle, int status) {
|
||||
static void async1_cb(uv_handle_t* handle, int status) {
|
||||
ASSERT(handle == &async1_handle);
|
||||
ASSERT(status == 0);
|
||||
|
||||
@ -136,13 +136,13 @@ static void async1_cb(oio_handle_t* handle, int status) {
|
||||
|
||||
if (async1_cb_called > 2 && !async1_closed) {
|
||||
async1_closed = 1;
|
||||
oio_close(handle);
|
||||
uv_close(handle);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
static void async2_cb(oio_handle_t* handle, int status) {
|
||||
static void async2_cb(uv_handle_t* handle, int status) {
|
||||
ASSERT(handle == &async2_handle);
|
||||
ASSERT(status == 0);
|
||||
|
||||
@ -150,13 +150,13 @@ static void async2_cb(oio_handle_t* handle, int status) {
|
||||
printf("async2_cb #%d\n", async2_cb_called);
|
||||
|
||||
if (async2_cb_called == 16) {
|
||||
oio_close(handle);
|
||||
uv_close(handle);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void prepare_cb(oio_handle_t* handle, int status) {
|
||||
static void prepare_cb(uv_handle_t* handle, int status) {
|
||||
int r;
|
||||
|
||||
ASSERT(handle == &prepare_handle);
|
||||
@ -164,24 +164,24 @@ static void prepare_cb(oio_handle_t* handle, int status) {
|
||||
|
||||
switch (prepare_cb_called) {
|
||||
case 0:
|
||||
thread1_id = oio_create_thread(thread1_entry, NULL);
|
||||
thread1_id = uv_create_thread(thread1_entry, NULL);
|
||||
ASSERT(thread1_id != 0);
|
||||
break;
|
||||
|
||||
#if 0
|
||||
case 1:
|
||||
thread2_id = oio_create_thread(thread2_entry, NULL);
|
||||
thread2_id = uv_create_thread(thread2_entry, NULL);
|
||||
ASSERT(thread2_id != 0);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
thread3_id = oio_create_thread(thread3_entry, NULL);
|
||||
thread3_id = uv_create_thread(thread3_entry, NULL);
|
||||
ASSERT(thread3_id != 0);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case 1:
|
||||
r = oio_close(handle);
|
||||
r = uv_close(handle);
|
||||
ASSERT(r == 0);
|
||||
break;
|
||||
|
||||
@ -196,30 +196,30 @@ static void prepare_cb(oio_handle_t* handle, int status) {
|
||||
TEST_IMPL(async) {
|
||||
int r;
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
r = oio_prepare_init(&prepare_handle, close_cb, NULL);
|
||||
r = uv_prepare_init(&prepare_handle, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_prepare_start(&prepare_handle, prepare_cb);
|
||||
r = uv_prepare_start(&prepare_handle, prepare_cb);
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = oio_async_init(&async1_handle, async1_cb, close_cb, NULL);
|
||||
r = uv_async_init(&async1_handle, async1_cb, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
|
||||
#if 0
|
||||
r = oio_async_init(&async2_handle, async2_cb, close_cb, NULL);
|
||||
r = uv_async_init(&async2_handle, async2_cb, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
#endif
|
||||
|
||||
r = oio_run();
|
||||
r = uv_run();
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = oio_wait_thread(thread1_id);
|
||||
r = uv_wait_thread(thread1_id);
|
||||
ASSERT(r == 0);
|
||||
#if 0
|
||||
r = oio_wait_thread(thread2_id);
|
||||
r = uv_wait_thread(thread2_id);
|
||||
ASSERT(r == 0);
|
||||
r = oio_wait_thread(thread3_id);
|
||||
r = uv_wait_thread(thread3_id);
|
||||
ASSERT(r == 0);
|
||||
#endif
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -28,7 +28,7 @@
|
||||
static int close_cb_called = 0;
|
||||
|
||||
|
||||
static void close_cb(oio_handle_t* handle, int status) {
|
||||
static void close_cb(uv_handle_t* handle, int status) {
|
||||
ASSERT(handle != NULL);
|
||||
ASSERT(status == 0);
|
||||
|
||||
@ -36,41 +36,41 @@ static void close_cb(oio_handle_t* handle, int status) {
|
||||
}
|
||||
|
||||
|
||||
static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
oio_buf buf = {0, 0};
|
||||
static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
|
||||
uv_buf buf = {0, 0};
|
||||
FATAL("alloc should not be called");
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
TEST_IMPL(bind_error_addrinuse) {
|
||||
struct sockaddr_in addr = oio_ip4_addr("0.0.0.0", TEST_PORT);
|
||||
oio_handle_t server1, server2;
|
||||
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
|
||||
uv_handle_t server1, server2;
|
||||
int r;
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
r = oio_tcp_init(&server1, close_cb, NULL);
|
||||
r = uv_tcp_init(&server1, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_bind(&server1, (struct sockaddr*) &addr);
|
||||
r = uv_bind(&server1, (struct sockaddr*) &addr);
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = oio_tcp_init(&server2, close_cb, NULL);
|
||||
r = uv_tcp_init(&server2, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_bind(&server2, (struct sockaddr*) &addr);
|
||||
r = uv_bind(&server2, (struct sockaddr*) &addr);
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = oio_listen(&server1, 128, NULL);
|
||||
r = uv_listen(&server1, 128, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_listen(&server2, 128, NULL);
|
||||
r = uv_listen(&server2, 128, NULL);
|
||||
ASSERT(r == -1);
|
||||
|
||||
ASSERT(oio_last_error().code == OIO_EADDRINUSE);
|
||||
ASSERT(uv_last_error().code == UV_EADDRINUSE);
|
||||
|
||||
oio_close(&server1);
|
||||
oio_close(&server2);
|
||||
uv_close(&server1);
|
||||
uv_close(&server2);
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(close_cb_called == 2);
|
||||
|
||||
@ -79,24 +79,24 @@ TEST_IMPL(bind_error_addrinuse) {
|
||||
|
||||
|
||||
TEST_IMPL(bind_error_addrnotavail_1) {
|
||||
struct sockaddr_in addr = oio_ip4_addr("127.255.255.255", TEST_PORT);
|
||||
oio_handle_t server;
|
||||
struct sockaddr_in addr = uv_ip4_addr("127.255.255.255", TEST_PORT);
|
||||
uv_handle_t server;
|
||||
int r;
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
r = oio_tcp_init(&server, close_cb, NULL);
|
||||
r = uv_tcp_init(&server, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_bind(&server, (struct sockaddr*) &addr);
|
||||
r = uv_bind(&server, (struct sockaddr*) &addr);
|
||||
|
||||
/* It seems that Linux is broken here - bind succeeds. */
|
||||
if (r == -1) {
|
||||
ASSERT(oio_last_error().code == OIO_EADDRNOTAVAIL);
|
||||
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
|
||||
}
|
||||
|
||||
oio_close(&server);
|
||||
uv_close(&server);
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(close_cb_called == 1);
|
||||
|
||||
@ -105,21 +105,21 @@ TEST_IMPL(bind_error_addrnotavail_1) {
|
||||
|
||||
|
||||
TEST_IMPL(bind_error_addrnotavail_2) {
|
||||
struct sockaddr_in addr = oio_ip4_addr("4.4.4.4", TEST_PORT);
|
||||
oio_handle_t server;
|
||||
struct sockaddr_in addr = uv_ip4_addr("4.4.4.4", TEST_PORT);
|
||||
uv_handle_t server;
|
||||
int r;
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
r = oio_tcp_init(&server, close_cb, NULL);
|
||||
r = uv_tcp_init(&server, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_bind(&server, (struct sockaddr*) &addr);
|
||||
r = uv_bind(&server, (struct sockaddr*) &addr);
|
||||
ASSERT(r == -1);
|
||||
ASSERT(oio_last_error().code == OIO_EADDRNOTAVAIL);
|
||||
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
|
||||
|
||||
oio_close(&server);
|
||||
uv_close(&server);
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(close_cb_called == 1);
|
||||
|
||||
@ -129,49 +129,49 @@ TEST_IMPL(bind_error_addrnotavail_2) {
|
||||
|
||||
TEST_IMPL(bind_error_fault) {
|
||||
char garbage[] = "blah blah blah blah blah blah blah blah blah blah blah blah";
|
||||
oio_handle_t server;
|
||||
uv_handle_t server;
|
||||
int r;
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
r = oio_tcp_init(&server, close_cb, NULL);
|
||||
r = uv_tcp_init(&server, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_bind(&server, (struct sockaddr*) &garbage);
|
||||
r = uv_bind(&server, (struct sockaddr*) &garbage);
|
||||
ASSERT(r == -1);
|
||||
|
||||
ASSERT(oio_last_error().code == OIO_EFAULT);
|
||||
ASSERT(uv_last_error().code == UV_EFAULT);
|
||||
|
||||
oio_close(&server);
|
||||
uv_close(&server);
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(close_cb_called == 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Notes: On Linux oio_bind(server, NULL) will segfault the program. */
|
||||
/* Notes: On Linux uv_bind(server, NULL) will segfault the program. */
|
||||
|
||||
TEST_IMPL(bind_error_inval) {
|
||||
struct sockaddr_in addr1 = oio_ip4_addr("0.0.0.0", TEST_PORT);
|
||||
struct sockaddr_in addr2 = oio_ip4_addr("0.0.0.0", TEST_PORT_2);
|
||||
oio_handle_t server;
|
||||
struct sockaddr_in addr1 = uv_ip4_addr("0.0.0.0", TEST_PORT);
|
||||
struct sockaddr_in addr2 = uv_ip4_addr("0.0.0.0", TEST_PORT_2);
|
||||
uv_handle_t server;
|
||||
int r;
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
r = oio_tcp_init(&server, close_cb, NULL);
|
||||
r = uv_tcp_init(&server, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_bind(&server, (struct sockaddr*) &addr1);
|
||||
r = uv_bind(&server, (struct sockaddr*) &addr1);
|
||||
ASSERT(r == 0);
|
||||
r = oio_bind(&server, (struct sockaddr*) &addr2);
|
||||
r = uv_bind(&server, (struct sockaddr*) &addr2);
|
||||
ASSERT(r == -1);
|
||||
|
||||
ASSERT(oio_last_error().code == OIO_EINVAL);
|
||||
ASSERT(uv_last_error().code == UV_EINVAL);
|
||||
|
||||
oio_close(&server);
|
||||
uv_close(&server);
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(close_cb_called == 1);
|
||||
|
||||
|
||||
@ -24,14 +24,14 @@
|
||||
* stack.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
|
||||
|
||||
static const char MESSAGE[] = "Failure is for the weak. Everyone dies alone.";
|
||||
|
||||
static oio_handle_t client;
|
||||
static oio_req_t connect_req, write_req, timeout_req, shutdown_req;
|
||||
static uv_handle_t client;
|
||||
static uv_req_t connect_req, write_req, timeout_req, shutdown_req;
|
||||
|
||||
static int nested = 0;
|
||||
static int close_cb_called = 0;
|
||||
@ -42,7 +42,7 @@ static int bytes_received = 0;
|
||||
static int shutdown_cb_called = 0;
|
||||
|
||||
|
||||
static void close_cb(oio_handle_t* handle, int status) {
|
||||
static void close_cb(uv_handle_t* handle, int status) {
|
||||
ASSERT(status == 0);
|
||||
ASSERT(nested == 0 && "close_cb must be called from a fresh stack");
|
||||
|
||||
@ -50,7 +50,7 @@ static void close_cb(oio_handle_t* handle, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void shutdown_cb(oio_req_t* req, int status) {
|
||||
static void shutdown_cb(uv_req_t* req, int status) {
|
||||
ASSERT(status == 0);
|
||||
ASSERT(nested == 0 && "shutdown_cb must be called from a fresh stack");
|
||||
|
||||
@ -58,22 +58,22 @@ static void shutdown_cb(oio_req_t* req, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
static void read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
|
||||
ASSERT(nested == 0 && "read_cb must be called from a fresh stack");
|
||||
|
||||
printf("Read. nread == %d\n", nread);
|
||||
free(buf.base);
|
||||
|
||||
if (nread == 0) {
|
||||
ASSERT(oio_last_error().code == OIO_EAGAIN);
|
||||
ASSERT(uv_last_error().code == UV_EAGAIN);
|
||||
return;
|
||||
|
||||
} else if (nread == -1) {
|
||||
ASSERT(oio_last_error().code == OIO_EOF);
|
||||
ASSERT(uv_last_error().code == UV_EOF);
|
||||
|
||||
nested++;
|
||||
if (oio_close(handle)) {
|
||||
FATAL("oio_close failed");
|
||||
if (uv_close(handle)) {
|
||||
FATAL("uv_close failed");
|
||||
}
|
||||
nested--;
|
||||
|
||||
@ -88,27 +88,27 @@ static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
/* from a fresh stack. */
|
||||
if (bytes_received == sizeof MESSAGE) {
|
||||
nested++;
|
||||
oio_req_init(&shutdown_req, handle, shutdown_cb);
|
||||
uv_req_init(&shutdown_req, handle, shutdown_cb);
|
||||
|
||||
puts("Shutdown");
|
||||
|
||||
if (oio_shutdown(&shutdown_req)) {
|
||||
FATAL("oio_shutdown failed");
|
||||
if (uv_shutdown(&shutdown_req)) {
|
||||
FATAL("uv_shutdown failed");
|
||||
}
|
||||
nested--;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void timeout_cb(oio_req_t* req, int64_t skew, int status) {
|
||||
static void timeout_cb(uv_req_t* req, int64_t skew, int status) {
|
||||
ASSERT(status == 0);
|
||||
ASSERT(nested == 0 && "timeout_cb must be called from a fresh stack");
|
||||
|
||||
puts("Timeout complete. Now read data...");
|
||||
|
||||
nested++;
|
||||
if (oio_read_start(&client, read_cb)) {
|
||||
FATAL("oio_read_start failed");
|
||||
if (uv_read_start(&client, read_cb)) {
|
||||
FATAL("uv_read_start failed");
|
||||
}
|
||||
nested--;
|
||||
|
||||
@ -116,7 +116,7 @@ static void timeout_cb(oio_req_t* req, int64_t skew, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void write_cb(oio_req_t* req, int status) {
|
||||
static void write_cb(uv_req_t* req, int status) {
|
||||
ASSERT(status == 0);
|
||||
ASSERT(nested == 0 && "write_cb must be called from a fresh stack");
|
||||
|
||||
@ -127,9 +127,9 @@ static void write_cb(oio_req_t* req, int status) {
|
||||
/* back to our receive buffer when we start reading. This maximizes the */
|
||||
/* tempation for the backend to use dirty stack for calling read_cb. */
|
||||
nested++;
|
||||
oio_req_init(&timeout_req, NULL, timeout_cb);
|
||||
if (oio_timeout(&timeout_req, 500)) {
|
||||
FATAL("oio_timeout failed");
|
||||
uv_req_init(&timeout_req, NULL, timeout_cb);
|
||||
if (uv_timeout(&timeout_req, 500)) {
|
||||
FATAL("uv_timeout failed");
|
||||
}
|
||||
nested--;
|
||||
|
||||
@ -137,8 +137,8 @@ static void write_cb(oio_req_t* req, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void connect_cb(oio_req_t* req, int status) {
|
||||
oio_buf buf;
|
||||
static void connect_cb(uv_req_t* req, int status) {
|
||||
uv_buf buf;
|
||||
|
||||
puts("Connected. Write some data to echo server...");
|
||||
|
||||
@ -150,10 +150,10 @@ static void connect_cb(oio_req_t* req, int status) {
|
||||
buf.base = (char*) &MESSAGE;
|
||||
buf.len = sizeof MESSAGE;
|
||||
|
||||
oio_req_init(&write_req, req->handle, write_cb);
|
||||
uv_req_init(&write_req, req->handle, write_cb);
|
||||
|
||||
if (oio_write(&write_req, &buf, 1)) {
|
||||
FATAL("oio_write failed");
|
||||
if (uv_write(&write_req, &buf, 1)) {
|
||||
FATAL("uv_write failed");
|
||||
}
|
||||
|
||||
nested--;
|
||||
@ -162,8 +162,8 @@ static void connect_cb(oio_req_t* req, int status) {
|
||||
}
|
||||
|
||||
|
||||
static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
oio_buf buf;
|
||||
static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
|
||||
uv_buf buf;
|
||||
buf.len = size;
|
||||
buf.base = (char*) malloc(size);
|
||||
ASSERT(buf.base);
|
||||
@ -172,24 +172,24 @@ static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
|
||||
|
||||
TEST_IMPL(callback_stack) {
|
||||
struct sockaddr_in addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
if (oio_tcp_init(&client, &close_cb, NULL)) {
|
||||
FATAL("oio_tcp_init failed");
|
||||
if (uv_tcp_init(&client, &close_cb, NULL)) {
|
||||
FATAL("uv_tcp_init failed");
|
||||
}
|
||||
|
||||
puts("Connecting...");
|
||||
|
||||
nested++;
|
||||
oio_req_init(&connect_req, &client, connect_cb);
|
||||
if (oio_connect(&connect_req, (struct sockaddr*) &addr)) {
|
||||
FATAL("oio_connect failed");
|
||||
uv_req_init(&connect_req, &client, connect_cb);
|
||||
if (uv_connect(&connect_req, (struct sockaddr*) &addr)) {
|
||||
FATAL("uv_connect failed");
|
||||
}
|
||||
nested--;
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(nested == 0);
|
||||
ASSERT(connect_cb_called == 1 && "connect_cb must be called exactly once");
|
||||
|
||||
@ -19,35 +19,35 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
||||
static oio_handle_t handle;
|
||||
static oio_req_t req;
|
||||
static uv_handle_t handle;
|
||||
static uv_req_t req;
|
||||
static int connect_cb_calls;
|
||||
static int close_cb_calls;
|
||||
|
||||
|
||||
static void on_close(oio_handle_t* handle, int status) {
|
||||
static void on_close(uv_handle_t* handle, int status) {
|
||||
ASSERT(status == 0);
|
||||
close_cb_calls++;
|
||||
}
|
||||
|
||||
|
||||
static void on_connect(oio_req_t *req, int status) {
|
||||
static void on_connect(uv_req_t *req, int status) {
|
||||
ASSERT(status == -1);
|
||||
ASSERT(oio_last_error().code == OIO_ECONNREFUSED);
|
||||
ASSERT(uv_last_error().code == UV_ECONNREFUSED);
|
||||
connect_cb_calls++;
|
||||
oio_close(req->handle);
|
||||
uv_close(req->handle);
|
||||
}
|
||||
|
||||
|
||||
static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
oio_buf buf = {0, 0};
|
||||
static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
|
||||
uv_buf buf = {0, 0};
|
||||
FATAL("alloc should not be called");
|
||||
return buf;
|
||||
}
|
||||
@ -57,26 +57,26 @@ TEST_IMPL(connection_fail) {
|
||||
struct sockaddr_in client_addr, server_addr;
|
||||
int r;
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
client_addr = oio_ip4_addr("0.0.0.0", 0);
|
||||
client_addr = uv_ip4_addr("0.0.0.0", 0);
|
||||
|
||||
/* There should be no servers listening on this port. */
|
||||
server_addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
|
||||
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
|
||||
r = oio_tcp_init(&handle, on_close, NULL);
|
||||
r = uv_tcp_init(&handle, on_close, NULL);
|
||||
ASSERT(!r);
|
||||
|
||||
/* We are never doing multiple reads/connects at a time anyway. */
|
||||
/* so these handles can be pre-initialized. */
|
||||
oio_req_init(&req, &handle, on_connect);
|
||||
uv_req_init(&req, &handle, on_connect);
|
||||
|
||||
oio_bind(&handle, (struct sockaddr*)&client_addr);
|
||||
r = oio_connect(&req, (struct sockaddr*)&server_addr);
|
||||
uv_bind(&handle, (struct sockaddr*)&client_addr);
|
||||
r = uv_connect(&req, (struct sockaddr*)&server_addr);
|
||||
ASSERT(!r);
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(connect_cb_calls == 1);
|
||||
ASSERT(close_cb_calls == 1);
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -33,7 +33,7 @@ static int close_cb_called = 0;
|
||||
static int connect_cb_called = 0;
|
||||
|
||||
|
||||
static void close_cb(oio_handle_t* handle, int status) {
|
||||
static void close_cb(uv_handle_t* handle, int status) {
|
||||
ASSERT(handle != NULL);
|
||||
ASSERT(status == 0);
|
||||
|
||||
@ -43,80 +43,80 @@ static void close_cb(oio_handle_t* handle, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void do_accept(oio_req_t* req, int64_t skew, int status) {
|
||||
oio_handle_t* server;
|
||||
oio_handle_t* accepted_handle = (oio_handle_t*)malloc(sizeof *accepted_handle);
|
||||
static void do_accept(uv_req_t* req, int64_t skew, int status) {
|
||||
uv_handle_t* server;
|
||||
uv_handle_t* accepted_handle = (uv_handle_t*)malloc(sizeof *accepted_handle);
|
||||
int r;
|
||||
|
||||
ASSERT(req != NULL);
|
||||
ASSERT(status == 0);
|
||||
ASSERT(accepted_handle != NULL);
|
||||
|
||||
server = (oio_handle_t*)req->data;
|
||||
r = oio_accept(server, accepted_handle, close_cb, NULL);
|
||||
server = (uv_handle_t*)req->data;
|
||||
r = uv_accept(server, accepted_handle, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
|
||||
do_accept_called++;
|
||||
|
||||
/* Immediately close the accepted handle. */
|
||||
oio_close(accepted_handle);
|
||||
uv_close(accepted_handle);
|
||||
|
||||
/* After accepting the two clients close the server handle */
|
||||
if (do_accept_called == 2) {
|
||||
oio_close(server);
|
||||
uv_close(server);
|
||||
}
|
||||
|
||||
free(req);
|
||||
}
|
||||
|
||||
|
||||
static void accept_cb(oio_handle_t* handle) {
|
||||
oio_req_t* timeout_req = (oio_req_t*)malloc(sizeof *timeout_req);
|
||||
static void accept_cb(uv_handle_t* handle) {
|
||||
uv_req_t* timeout_req = (uv_req_t*)malloc(sizeof *timeout_req);
|
||||
|
||||
ASSERT(timeout_req != NULL);
|
||||
|
||||
/* Accept the client after 1 second */
|
||||
oio_req_init(timeout_req, NULL, &do_accept);
|
||||
uv_req_init(timeout_req, NULL, &do_accept);
|
||||
timeout_req->data = (void*)handle;
|
||||
oio_timeout(timeout_req, 1000);
|
||||
uv_timeout(timeout_req, 1000);
|
||||
|
||||
accept_cb_called++;
|
||||
}
|
||||
|
||||
|
||||
static void start_server() {
|
||||
struct sockaddr_in addr = oio_ip4_addr("0.0.0.0", TEST_PORT);
|
||||
oio_handle_t* server = (oio_handle_t*)malloc(sizeof *server);
|
||||
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
|
||||
uv_handle_t* server = (uv_handle_t*)malloc(sizeof *server);
|
||||
int r;
|
||||
|
||||
ASSERT(server != NULL);
|
||||
|
||||
r = oio_tcp_init(server, close_cb, NULL);
|
||||
r = uv_tcp_init(server, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = oio_bind(server, (struct sockaddr*) &addr);
|
||||
r = uv_bind(server, (struct sockaddr*) &addr);
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = oio_listen(server, 128, accept_cb);
|
||||
r = uv_listen(server, 128, accept_cb);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
|
||||
static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
static void read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
|
||||
/* The server will not send anything, it should close gracefully. */
|
||||
ASSERT(handle != NULL);
|
||||
ASSERT(nread == -1);
|
||||
ASSERT(oio_last_error().code == OIO_EOF);
|
||||
ASSERT(uv_last_error().code == UV_EOF);
|
||||
|
||||
if (buf.base) {
|
||||
free(buf.base);
|
||||
}
|
||||
|
||||
oio_close(handle);
|
||||
uv_close(handle);
|
||||
}
|
||||
|
||||
|
||||
static void connect_cb(oio_req_t* req, int status) {
|
||||
static void connect_cb(uv_req_t* req, int status) {
|
||||
int r;
|
||||
|
||||
ASSERT(req != NULL);
|
||||
@ -124,7 +124,7 @@ static void connect_cb(oio_req_t* req, int status) {
|
||||
|
||||
/* Not that the server will send anything, but otherwise we'll never know */
|
||||
/* when te server closes the connection. */
|
||||
r = oio_read_start(req->handle, read_cb);
|
||||
r = uv_read_start(req->handle, read_cb);
|
||||
ASSERT(r == 0);
|
||||
|
||||
connect_cb_called++;
|
||||
@ -134,25 +134,25 @@ static void connect_cb(oio_req_t* req, int status) {
|
||||
|
||||
|
||||
static void client_connect() {
|
||||
struct sockaddr_in addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
oio_handle_t* client = (oio_handle_t*)malloc(sizeof *client);
|
||||
oio_req_t* connect_req = (oio_req_t*)malloc(sizeof *connect_req);
|
||||
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client);
|
||||
uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
|
||||
int r;
|
||||
|
||||
ASSERT(client != NULL);
|
||||
ASSERT(connect_req != NULL);
|
||||
|
||||
r = oio_tcp_init(client, close_cb, NULL);
|
||||
r = uv_tcp_init(client, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
|
||||
oio_req_init(connect_req, client, connect_cb);
|
||||
r = oio_connect(connect_req, (struct sockaddr*)&addr);
|
||||
uv_req_init(connect_req, client, connect_cb);
|
||||
r = uv_connect(connect_req, (struct sockaddr*)&addr);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
|
||||
static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
oio_buf buf;
|
||||
static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
|
||||
uv_buf buf;
|
||||
buf.base = (char*)malloc(size);
|
||||
buf.len = size;
|
||||
return buf;
|
||||
@ -161,14 +161,14 @@ static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
|
||||
|
||||
TEST_IMPL(delayed_accept) {
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
start_server();
|
||||
|
||||
client_connect();
|
||||
client_connect();
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(accept_cb_called == 2);
|
||||
ASSERT(do_accept_called == 2);
|
||||
|
||||
@ -64,7 +64,7 @@
|
||||
*/
|
||||
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
|
||||
#include <math.h>
|
||||
@ -75,15 +75,15 @@
|
||||
#define TIMEOUT 100
|
||||
|
||||
|
||||
static oio_handle_t prepare_1_handle;
|
||||
static oio_handle_t prepare_2_handle;
|
||||
static uv_handle_t prepare_1_handle;
|
||||
static uv_handle_t prepare_2_handle;
|
||||
|
||||
static oio_handle_t check_handle;
|
||||
static uv_handle_t check_handle;
|
||||
|
||||
static oio_handle_t idle_1_handles[IDLE_COUNT];
|
||||
static oio_handle_t idle_2_handle;
|
||||
static uv_handle_t idle_1_handles[IDLE_COUNT];
|
||||
static uv_handle_t idle_2_handle;
|
||||
|
||||
static oio_req_t timeout_req;
|
||||
static uv_req_t timeout_req;
|
||||
|
||||
|
||||
static int loop_iteration = 0;
|
||||
@ -109,7 +109,7 @@ static int idle_2_is_active = 0;
|
||||
static int timeout_cb_called = 0;
|
||||
|
||||
|
||||
static void timeout_cb(oio_req_t *req, int64_t skew, int status) {
|
||||
static void timeout_cb(uv_req_t *req, int64_t skew, int status) {
|
||||
int r;
|
||||
|
||||
ASSERT(req == &timeout_req);
|
||||
@ -117,12 +117,12 @@ static void timeout_cb(oio_req_t *req, int64_t skew, int status) {
|
||||
|
||||
timeout_cb_called++;
|
||||
|
||||
r = oio_timeout(req, TIMEOUT);
|
||||
r = uv_timeout(req, TIMEOUT);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
|
||||
static void idle_2_cb(oio_handle_t* handle, int status) {
|
||||
static void idle_2_cb(uv_handle_t* handle, int status) {
|
||||
int r;
|
||||
|
||||
LOG("IDLE_2_CB\n");
|
||||
@ -132,12 +132,12 @@ static void idle_2_cb(oio_handle_t* handle, int status) {
|
||||
|
||||
idle_2_cb_called++;
|
||||
|
||||
r = oio_close(handle);
|
||||
r = uv_close(handle);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
|
||||
static void idle_2_close_cb(oio_handle_t* handle, int status){
|
||||
static void idle_2_close_cb(uv_handle_t* handle, int status){
|
||||
LOG("IDLE_2_CLOSE_CB\n");
|
||||
|
||||
ASSERT(handle == &idle_2_handle);
|
||||
@ -150,7 +150,7 @@ static void idle_2_close_cb(oio_handle_t* handle, int status){
|
||||
}
|
||||
|
||||
|
||||
static void idle_1_cb(oio_handle_t* handle, int status) {
|
||||
static void idle_1_cb(uv_handle_t* handle, int status) {
|
||||
int r;
|
||||
|
||||
LOG("IDLE_1_CB\n");
|
||||
@ -162,9 +162,9 @@ static void idle_1_cb(oio_handle_t* handle, int status) {
|
||||
|
||||
/* Init idle_2 and make it active */
|
||||
if (!idle_2_is_active) {
|
||||
r = oio_idle_init(&idle_2_handle, idle_2_close_cb, NULL);
|
||||
r = uv_idle_init(&idle_2_handle, idle_2_close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_idle_start(&idle_2_handle, idle_2_cb);
|
||||
r = uv_idle_start(&idle_2_handle, idle_2_cb);
|
||||
ASSERT(r == 0);
|
||||
idle_2_is_active = 1;
|
||||
idle_2_cb_started++;
|
||||
@ -173,14 +173,14 @@ static void idle_1_cb(oio_handle_t* handle, int status) {
|
||||
idle_1_cb_called++;
|
||||
|
||||
if (idle_1_cb_called % 5 == 0) {
|
||||
r = oio_idle_stop(handle);
|
||||
r = uv_idle_stop(handle);
|
||||
ASSERT(r == 0);
|
||||
idles_1_active--;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void idle_1_close_cb(oio_handle_t* handle, int status){
|
||||
static void idle_1_close_cb(uv_handle_t* handle, int status){
|
||||
LOG("IDLE_1_CLOSE_CB\n");
|
||||
|
||||
ASSERT(handle != NULL);
|
||||
@ -190,7 +190,7 @@ static void idle_1_close_cb(oio_handle_t* handle, int status){
|
||||
}
|
||||
|
||||
|
||||
static void check_cb(oio_handle_t* handle, int status) {
|
||||
static void check_cb(uv_handle_t* handle, int status) {
|
||||
int i, r;
|
||||
|
||||
LOG("CHECK_CB\n");
|
||||
@ -206,29 +206,29 @@ static void check_cb(oio_handle_t* handle, int status) {
|
||||
if (loop_iteration < ITERATIONS) {
|
||||
/* Make some idle watchers active */
|
||||
for (i = 0; i < 1 + (loop_iteration % IDLE_COUNT); i++) {
|
||||
r = oio_idle_start(&idle_1_handles[i], idle_1_cb);
|
||||
r = uv_idle_start(&idle_1_handles[i], idle_1_cb);
|
||||
ASSERT(r == 0);
|
||||
idles_1_active++;
|
||||
}
|
||||
|
||||
} else {
|
||||
/* End of the test - close all handles */
|
||||
r = oio_close(&prepare_1_handle);
|
||||
r = uv_close(&prepare_1_handle);
|
||||
ASSERT(r == 0);
|
||||
r = oio_close(&check_handle);
|
||||
r = uv_close(&check_handle);
|
||||
ASSERT(r == 0);
|
||||
r = oio_close(&prepare_2_handle);
|
||||
r = uv_close(&prepare_2_handle);
|
||||
ASSERT(r == 0);
|
||||
|
||||
for (i = 0; i < IDLE_COUNT; i++) {
|
||||
r = oio_close(&idle_1_handles[i]);
|
||||
r = uv_close(&idle_1_handles[i]);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
/* This handle is closed/recreated every time, close it only if it is */
|
||||
/* active.*/
|
||||
if (idle_2_is_active) {
|
||||
r = oio_close(&idle_2_handle);
|
||||
r = uv_close(&idle_2_handle);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
}
|
||||
@ -237,7 +237,7 @@ static void check_cb(oio_handle_t* handle, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void check_close_cb(oio_handle_t* handle, int status){
|
||||
static void check_close_cb(uv_handle_t* handle, int status){
|
||||
LOG("CHECK_CLOSE_CB\n");
|
||||
ASSERT(handle == &check_handle);
|
||||
ASSERT(status == 0);
|
||||
@ -246,7 +246,7 @@ static void check_close_cb(oio_handle_t* handle, int status){
|
||||
}
|
||||
|
||||
|
||||
static void prepare_2_cb(oio_handle_t* handle, int status) {
|
||||
static void prepare_2_cb(uv_handle_t* handle, int status) {
|
||||
int r;
|
||||
|
||||
LOG("PREPARE_2_CB\n");
|
||||
@ -263,14 +263,14 @@ static void prepare_2_cb(oio_handle_t* handle, int status) {
|
||||
/* (loop_iteration % 2 == 0) cannot be true. */
|
||||
ASSERT(loop_iteration % 2 != 0);
|
||||
|
||||
r = oio_prepare_stop(handle);
|
||||
r = uv_prepare_stop(handle);
|
||||
ASSERT(r == 0);
|
||||
|
||||
prepare_2_cb_called++;
|
||||
}
|
||||
|
||||
|
||||
static void prepare_2_close_cb(oio_handle_t* handle, int status){
|
||||
static void prepare_2_close_cb(uv_handle_t* handle, int status){
|
||||
LOG("PREPARE_2_CLOSE_CB\n");
|
||||
ASSERT(handle == &prepare_2_handle);
|
||||
ASSERT(status == 0);
|
||||
@ -279,7 +279,7 @@ static void prepare_2_close_cb(oio_handle_t* handle, int status){
|
||||
}
|
||||
|
||||
|
||||
static void prepare_1_cb(oio_handle_t* handle, int status) {
|
||||
static void prepare_1_cb(uv_handle_t* handle, int status) {
|
||||
int r;
|
||||
|
||||
LOG("PREPARE_1_CB\n");
|
||||
@ -293,7 +293,7 @@ static void prepare_1_cb(oio_handle_t* handle, int status) {
|
||||
*/
|
||||
|
||||
if (loop_iteration % 2 == 0) {
|
||||
r = oio_prepare_start(&prepare_2_handle, prepare_2_cb);
|
||||
r = uv_prepare_start(&prepare_2_handle, prepare_2_cb);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
@ -304,7 +304,7 @@ static void prepare_1_cb(oio_handle_t* handle, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void prepare_1_close_cb(oio_handle_t* handle, int status){
|
||||
static void prepare_1_close_cb(uv_handle_t* handle, int status){
|
||||
LOG("PREPARE_1_CLOSE_CB");
|
||||
ASSERT(handle == &prepare_1_handle);
|
||||
ASSERT(status == 0);
|
||||
@ -313,8 +313,8 @@ static void prepare_1_close_cb(oio_handle_t* handle, int status){
|
||||
}
|
||||
|
||||
|
||||
static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
oio_buf rv = { 0, 0 };
|
||||
static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
|
||||
uv_buf rv = { 0, 0 };
|
||||
FATAL("alloc_cb should never be called in this test");
|
||||
return rv;
|
||||
}
|
||||
@ -324,25 +324,25 @@ TEST_IMPL(loop_handles) {
|
||||
int i;
|
||||
int r;
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
r = oio_prepare_init(&prepare_1_handle, prepare_1_close_cb, NULL);
|
||||
r = uv_prepare_init(&prepare_1_handle, prepare_1_close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_prepare_start(&prepare_1_handle, prepare_1_cb);
|
||||
r = uv_prepare_start(&prepare_1_handle, prepare_1_cb);
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = oio_check_init(&check_handle, check_close_cb, NULL);
|
||||
r = uv_check_init(&check_handle, check_close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
r = oio_check_start(&check_handle, check_cb);
|
||||
r = uv_check_start(&check_handle, check_cb);
|
||||
ASSERT(r == 0);
|
||||
|
||||
/* initialize only, prepare_2 is started by prepare_1_cb */
|
||||
r = oio_prepare_init(&prepare_2_handle, prepare_2_close_cb, NULL);
|
||||
r = uv_prepare_init(&prepare_2_handle, prepare_2_close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
|
||||
for (i = 0; i < IDLE_COUNT; i++) {
|
||||
/* initialize only, idle_1 handles are started by check_cb */
|
||||
r = oio_idle_init(&idle_1_handles[i], idle_1_close_cb, NULL);
|
||||
r = uv_idle_init(&idle_1_handles[i], idle_1_close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
@ -350,12 +350,12 @@ TEST_IMPL(loop_handles) {
|
||||
|
||||
/* the timer callback is there to keep the event loop polling */
|
||||
/* unref it as it is not supposed to keep the loop alive */
|
||||
oio_req_init(&timeout_req, NULL, timeout_cb);
|
||||
r = oio_timeout(&timeout_req, TIMEOUT);
|
||||
uv_req_init(&timeout_req, NULL, timeout_cb);
|
||||
r = uv_timeout(&timeout_req, TIMEOUT);
|
||||
ASSERT(r == 0);
|
||||
oio_unref();
|
||||
uv_unref();
|
||||
|
||||
r = oio_run();
|
||||
r = uv_run();
|
||||
ASSERT(r == 0);
|
||||
|
||||
ASSERT(loop_iteration == ITERATIONS);
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
@ -39,16 +39,16 @@ static char PING[] = "PING\n";
|
||||
typedef struct {
|
||||
int pongs;
|
||||
int state;
|
||||
oio_handle_t handle;
|
||||
oio_req_t connect_req;
|
||||
oio_req_t read_req;
|
||||
uv_handle_t handle;
|
||||
uv_req_t connect_req;
|
||||
uv_req_t read_req;
|
||||
char read_buffer[BUFSIZE];
|
||||
} pinger_t;
|
||||
|
||||
void pinger_try_read(pinger_t* pinger);
|
||||
|
||||
|
||||
static void pinger_on_close(oio_handle_t* handle, int status) {
|
||||
static void pinger_on_close(uv_handle_t* handle, int status) {
|
||||
pinger_t* pinger = (pinger_t*)handle->data;
|
||||
|
||||
ASSERT(status == 0);
|
||||
@ -60,7 +60,7 @@ static void pinger_on_close(oio_handle_t* handle, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void pinger_after_write(oio_req_t *req, int status) {
|
||||
static void pinger_after_write(uv_req_t *req, int status) {
|
||||
ASSERT(status == 0);
|
||||
|
||||
free(req);
|
||||
@ -68,31 +68,31 @@ static void pinger_after_write(oio_req_t *req, int status) {
|
||||
|
||||
|
||||
static void pinger_write_ping(pinger_t* pinger) {
|
||||
oio_req_t *req;
|
||||
oio_buf buf;
|
||||
uv_req_t *req;
|
||||
uv_buf buf;
|
||||
|
||||
buf.base = (char*)&PING;
|
||||
buf.len = strlen(PING);
|
||||
|
||||
req = (oio_req_t*)malloc(sizeof(*req));
|
||||
oio_req_init(req, &pinger->handle, pinger_after_write);
|
||||
req = (uv_req_t*)malloc(sizeof(*req));
|
||||
uv_req_init(req, &pinger->handle, pinger_after_write);
|
||||
|
||||
if (oio_write(req, &buf, 1)) {
|
||||
FATAL("oio_write failed");
|
||||
if (uv_write(req, &buf, 1)) {
|
||||
FATAL("uv_write failed");
|
||||
}
|
||||
|
||||
puts("PING");
|
||||
}
|
||||
|
||||
|
||||
static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
|
||||
unsigned int i;
|
||||
pinger_t* pinger;
|
||||
|
||||
pinger = (pinger_t*)handle->data;
|
||||
|
||||
if (nread < 0) {
|
||||
ASSERT(oio_last_error().code == OIO_EOF);
|
||||
ASSERT(uv_last_error().code == UV_EOF);
|
||||
|
||||
puts("got EOF");
|
||||
|
||||
@ -100,7 +100,7 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
free(buf.base);
|
||||
}
|
||||
|
||||
oio_close(&pinger->handle);
|
||||
uv_close(&pinger->handle);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -115,7 +115,7 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
if (pinger->pongs < NUM_PINGS) {
|
||||
pinger_write_ping(pinger);
|
||||
} else {
|
||||
oio_close(&pinger->handle);
|
||||
uv_close(&pinger->handle);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -123,20 +123,20 @@ static void pinger_read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
}
|
||||
|
||||
|
||||
static void pinger_on_connect(oio_req_t *req, int status) {
|
||||
static void pinger_on_connect(uv_req_t *req, int status) {
|
||||
pinger_t *pinger = (pinger_t*)req->handle->data;
|
||||
|
||||
ASSERT(status == 0);
|
||||
|
||||
pinger_write_ping(pinger);
|
||||
|
||||
oio_read_start(req->handle, pinger_read_cb);
|
||||
uv_read_start(req->handle, pinger_read_cb);
|
||||
}
|
||||
|
||||
|
||||
static void pinger_new() {
|
||||
int r;
|
||||
struct sockaddr_in server_addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
struct sockaddr_in server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
pinger_t *pinger;
|
||||
|
||||
pinger = (pinger_t*)malloc(sizeof(*pinger));
|
||||
@ -144,20 +144,20 @@ static void pinger_new() {
|
||||
pinger->pongs = 0;
|
||||
|
||||
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
|
||||
r = oio_tcp_init(&pinger->handle, pinger_on_close, (void*)pinger);
|
||||
r = uv_tcp_init(&pinger->handle, pinger_on_close, (void*)pinger);
|
||||
ASSERT(!r);
|
||||
|
||||
/* We are never doing multiple reads/connects at a time anyway. */
|
||||
/* so these handles can be pre-initialized. */
|
||||
oio_req_init(&pinger->connect_req, &pinger->handle, pinger_on_connect);
|
||||
uv_req_init(&pinger->connect_req, &pinger->handle, pinger_on_connect);
|
||||
|
||||
r = oio_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
|
||||
r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr);
|
||||
ASSERT(!r);
|
||||
}
|
||||
|
||||
|
||||
static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
oio_buf buf;
|
||||
static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
|
||||
uv_buf buf;
|
||||
buf.base = (char*)malloc(size);
|
||||
buf.len = size;
|
||||
return buf;
|
||||
@ -165,10 +165,10 @@ static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
|
||||
|
||||
TEST_IMPL(ping_pong) {
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
pinger_new();
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(completed_pingers == 1);
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -45,7 +45,7 @@ static int bytes_received = 0;
|
||||
static int bytes_received_done = 0;
|
||||
|
||||
|
||||
static void close_cb(oio_handle_t* handle, int status) {
|
||||
static void close_cb(uv_handle_t* handle, int status) {
|
||||
ASSERT(handle != NULL);
|
||||
ASSERT(status == 0);
|
||||
|
||||
@ -55,7 +55,7 @@ static void close_cb(oio_handle_t* handle, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void shutdown_cb(oio_req_t* req, int status) {
|
||||
static void shutdown_cb(uv_req_t* req, int status) {
|
||||
ASSERT(req);
|
||||
ASSERT(status == 0);
|
||||
|
||||
@ -72,18 +72,18 @@ static void shutdown_cb(oio_req_t* req, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
static void read_cb(uv_handle_t* handle, int nread, uv_buf buf) {
|
||||
ASSERT(handle != NULL);
|
||||
|
||||
if (nread < 0) {
|
||||
ASSERT(oio_last_error().code == OIO_EOF);
|
||||
ASSERT(uv_last_error().code == UV_EOF);
|
||||
printf("GOT EOF\n");
|
||||
|
||||
if (buf.base) {
|
||||
free(buf.base);
|
||||
}
|
||||
|
||||
oio_close(handle);
|
||||
uv_close(handle);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -93,12 +93,12 @@ static void read_cb(oio_handle_t* handle, int nread, oio_buf buf) {
|
||||
}
|
||||
|
||||
|
||||
static void write_cb(oio_req_t* req, int status) {
|
||||
static void write_cb(uv_req_t* req, int status) {
|
||||
ASSERT(req != NULL);
|
||||
|
||||
if (status) {
|
||||
oio_err_t err = oio_last_error();
|
||||
fprintf(stderr, "oio_write error: %s\n", oio_strerror(err));
|
||||
uv_err_t err = uv_last_error();
|
||||
fprintf(stderr, "uv_write error: %s\n", uv_strerror(err));
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
@ -109,9 +109,9 @@ static void write_cb(oio_req_t* req, int status) {
|
||||
}
|
||||
|
||||
|
||||
static void connect_cb(oio_req_t* req, int status) {
|
||||
oio_buf send_bufs[CHUNKS_PER_WRITE];
|
||||
oio_handle_t* handle;
|
||||
static void connect_cb(uv_req_t* req, int status) {
|
||||
uv_buf send_bufs[CHUNKS_PER_WRITE];
|
||||
uv_handle_t* handle;
|
||||
int i, j, r;
|
||||
|
||||
ASSERT(req != NULL);
|
||||
@ -130,33 +130,33 @@ static void connect_cb(oio_req_t* req, int status) {
|
||||
bytes_sent += CHUNK_SIZE;
|
||||
}
|
||||
|
||||
req = (oio_req_t*)malloc(sizeof *req);
|
||||
req = (uv_req_t*)malloc(sizeof *req);
|
||||
ASSERT(req != NULL);
|
||||
|
||||
oio_req_init(req, handle, write_cb);
|
||||
r = oio_write(req, (oio_buf*)&send_bufs, CHUNKS_PER_WRITE);
|
||||
uv_req_init(req, handle, write_cb);
|
||||
r = uv_write(req, (uv_buf*)&send_bufs, CHUNKS_PER_WRITE);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
/* Shutdown on drain. FIXME: dealloc req? */
|
||||
req = (oio_req_t*) malloc(sizeof(oio_req_t));
|
||||
req = (uv_req_t*) malloc(sizeof(uv_req_t));
|
||||
ASSERT(req != NULL);
|
||||
oio_req_init(req, handle, shutdown_cb);
|
||||
r = oio_shutdown(req);
|
||||
uv_req_init(req, handle, shutdown_cb);
|
||||
r = uv_shutdown(req);
|
||||
ASSERT(r == 0);
|
||||
|
||||
/* Start reading */
|
||||
req = (oio_req_t*)malloc(sizeof *req);
|
||||
req = (uv_req_t*)malloc(sizeof *req);
|
||||
ASSERT(req != NULL);
|
||||
|
||||
oio_req_init(req, handle, read_cb);
|
||||
r = oio_read_start(handle, read_cb);
|
||||
uv_req_init(req, handle, read_cb);
|
||||
r = uv_read_start(handle, read_cb);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
|
||||
|
||||
static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
oio_buf buf;
|
||||
static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
|
||||
uv_buf buf;
|
||||
buf.base = (char*)malloc(size);
|
||||
buf.len = size;
|
||||
return buf;
|
||||
@ -164,9 +164,9 @@ static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
|
||||
|
||||
TEST_IMPL(tcp_writealot) {
|
||||
struct sockaddr_in addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
oio_handle_t* client = (oio_handle_t*)malloc(sizeof *client);
|
||||
oio_req_t* connect_req = (oio_req_t*)malloc(sizeof *connect_req);
|
||||
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
||||
uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client);
|
||||
uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
|
||||
int r;
|
||||
|
||||
ASSERT(client != NULL);
|
||||
@ -176,16 +176,16 @@ TEST_IMPL(tcp_writealot) {
|
||||
|
||||
ASSERT(send_buffer != NULL);
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
r = oio_tcp_init(client, close_cb, NULL);
|
||||
r = uv_tcp_init(client, close_cb, NULL);
|
||||
ASSERT(r == 0);
|
||||
|
||||
oio_req_init(connect_req, client, connect_cb);
|
||||
r = oio_connect(connect_req, (struct sockaddr*)&addr);
|
||||
uv_req_init(connect_req, client, connect_cb);
|
||||
r = uv_connect(connect_req, (struct sockaddr*)&addr);
|
||||
ASSERT(r == 0);
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
ASSERT(shutdown_cb_called == 1);
|
||||
ASSERT(connect_cb_called == 1);
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "../oio.h"
|
||||
#include "../uv.h"
|
||||
#include "task.h"
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ static int timeouts = 0;
|
||||
|
||||
static int64_t start_time;
|
||||
|
||||
static void timeout_cb(oio_req_t *req, int64_t skew, int status) {
|
||||
static void timeout_cb(uv_req_t *req, int64_t skew, int status) {
|
||||
ASSERT(req != NULL);
|
||||
ASSERT(status == 0);
|
||||
|
||||
@ -36,11 +36,11 @@ static void timeout_cb(oio_req_t *req, int64_t skew, int status) {
|
||||
timeouts++;
|
||||
|
||||
/* Just call this randomly for the code coverage. */
|
||||
oio_update_time();
|
||||
uv_update_time();
|
||||
}
|
||||
|
||||
static void exit_timeout_cb(oio_req_t *req, int64_t skew, int status) {
|
||||
int64_t now = oio_now();
|
||||
static void exit_timeout_cb(uv_req_t *req, int64_t skew, int status) {
|
||||
int64_t now = uv_now();
|
||||
ASSERT(req != NULL);
|
||||
ASSERT(status == 0);
|
||||
ASSERT(timeouts == expected);
|
||||
@ -48,57 +48,57 @@ static void exit_timeout_cb(oio_req_t *req, int64_t skew, int status) {
|
||||
exit(0);
|
||||
}
|
||||
|
||||
static void dummy_timeout_cb(oio_req_t *req, int64_t skew, int status) {
|
||||
static void dummy_timeout_cb(uv_req_t *req, int64_t skew, int status) {
|
||||
/* Should never be called */
|
||||
FATAL("dummy_timer_cb should never be called");
|
||||
}
|
||||
|
||||
|
||||
static oio_buf alloc_cb(oio_handle_t* handle, size_t size) {
|
||||
oio_buf buf = {0, 0};
|
||||
static uv_buf alloc_cb(uv_handle_t* handle, size_t size) {
|
||||
uv_buf buf = {0, 0};
|
||||
FATAL("alloc should not be called");
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
TEST_IMPL(timeout) {
|
||||
oio_req_t *req;
|
||||
oio_req_t exit_req;
|
||||
oio_req_t dummy_req;
|
||||
uv_req_t *req;
|
||||
uv_req_t exit_req;
|
||||
uv_req_t dummy_req;
|
||||
int i;
|
||||
|
||||
oio_init(alloc_cb);
|
||||
uv_init(alloc_cb);
|
||||
|
||||
start_time = oio_now();
|
||||
start_time = uv_now();
|
||||
ASSERT(0 < start_time);
|
||||
|
||||
/* Let 10 timers time out in 500 ms total. */
|
||||
for (i = 0; i < 10; i++) {
|
||||
req = (oio_req_t*)malloc(sizeof(*req));
|
||||
req = (uv_req_t*)malloc(sizeof(*req));
|
||||
ASSERT(req != NULL);
|
||||
|
||||
oio_req_init(req, NULL, timeout_cb);
|
||||
uv_req_init(req, NULL, timeout_cb);
|
||||
|
||||
if (oio_timeout(req, i * 50) < 0) {
|
||||
FATAL("oio_timeout failed");
|
||||
if (uv_timeout(req, i * 50) < 0) {
|
||||
FATAL("uv_timeout failed");
|
||||
}
|
||||
|
||||
expected++;
|
||||
}
|
||||
|
||||
/* The 11th timer exits the test and runs after 1 s. */
|
||||
oio_req_init(&exit_req, NULL, exit_timeout_cb);
|
||||
if (oio_timeout(&exit_req, 1000) < 0) {
|
||||
FATAL("oio_timeout failed");
|
||||
uv_req_init(&exit_req, NULL, exit_timeout_cb);
|
||||
if (uv_timeout(&exit_req, 1000) < 0) {
|
||||
FATAL("uv_timeout failed");
|
||||
}
|
||||
|
||||
/* The 12th timer should never run. */
|
||||
oio_req_init(&dummy_req, NULL, dummy_timeout_cb);
|
||||
if (oio_timeout(&dummy_req, 2000)) {
|
||||
FATAL("oio_timeout failed");
|
||||
uv_req_init(&dummy_req, NULL, dummy_timeout_cb);
|
||||
if (uv_timeout(&dummy_req, 2000)) {
|
||||
FATAL("uv_timeout failed");
|
||||
}
|
||||
|
||||
oio_run();
|
||||
uv_run();
|
||||
|
||||
FATAL("should never get here");
|
||||
return 2;
|
||||
|
||||
6
tree.h
6
tree.h
@ -23,8 +23,8 @@
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _OIO_TREE_H_
|
||||
#define _OIO_TREE_H_
|
||||
#ifndef _UV_TREE_H_
|
||||
#define _UV_TREE_H_
|
||||
|
||||
#define __unused
|
||||
|
||||
@ -759,4 +759,4 @@ name##_RB_MINMAX(struct name *head, int val) \
|
||||
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
|
||||
(x) = (y))
|
||||
|
||||
#endif /* _OIO_TREE_H_ */
|
||||
#endif /* _UV_TREE_H_ */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -19,8 +19,8 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef OIO_UNIX_H
|
||||
#define OIO_UNIX_H
|
||||
#ifndef UV_UNIX_H
|
||||
#define UV_UNIX_H
|
||||
|
||||
#include "ngx-queue.h"
|
||||
|
||||
@ -35,44 +35,44 @@
|
||||
typedef struct {
|
||||
char* base;
|
||||
size_t len;
|
||||
} oio_buf;
|
||||
} uv_buf;
|
||||
|
||||
|
||||
#define oio_req_private_fields \
|
||||
#define uv_req_private_fields \
|
||||
int write_index; \
|
||||
ev_timer timer; \
|
||||
ngx_queue_t queue; \
|
||||
oio_buf* bufs; \
|
||||
uv_buf* bufs; \
|
||||
int bufcnt;
|
||||
|
||||
|
||||
/* TODO: union or classes please! */
|
||||
#define oio_handle_private_fields \
|
||||
#define uv_handle_private_fields \
|
||||
int fd; \
|
||||
int flags; \
|
||||
ev_idle next_watcher; \
|
||||
/* OIO_TCP */ \
|
||||
/* UV_TCP */ \
|
||||
int delayed_error; \
|
||||
oio_read_cb read_cb; \
|
||||
oio_accept_cb accept_cb; \
|
||||
uv_read_cb read_cb; \
|
||||
uv_accept_cb accept_cb; \
|
||||
int accepted_fd; \
|
||||
oio_req_t *connect_req; \
|
||||
oio_req_t *shutdown_req; \
|
||||
uv_req_t *connect_req; \
|
||||
uv_req_t *shutdown_req; \
|
||||
ev_io read_watcher; \
|
||||
ev_io write_watcher; \
|
||||
ngx_queue_t write_queue; \
|
||||
/* OIO_PREPARE */ \
|
||||
/* UV_PREPARE */ \
|
||||
ev_prepare prepare_watcher; \
|
||||
oio_loop_cb prepare_cb; \
|
||||
/* OIO_CHECK */ \
|
||||
uv_loop_cb prepare_cb; \
|
||||
/* UV_CHECK */ \
|
||||
ev_check check_watcher; \
|
||||
oio_loop_cb check_cb; \
|
||||
/* OIO_IDLE */ \
|
||||
uv_loop_cb check_cb; \
|
||||
/* UV_IDLE */ \
|
||||
ev_idle idle_watcher; \
|
||||
oio_loop_cb idle_cb; \
|
||||
/* OIO_ASYNC */ \
|
||||
uv_loop_cb idle_cb; \
|
||||
/* UV_ASYNC */ \
|
||||
ev_async async_watcher; \
|
||||
oio_loop_cb async_cb;
|
||||
uv_loop_cb async_cb;
|
||||
|
||||
|
||||
#endif /* OIO_UNIX_H */
|
||||
#endif /* UV_UNIX_H */
|
||||
File diff suppressed because it is too large
Load Diff
@ -33,15 +33,15 @@
|
||||
|
||||
|
||||
/**
|
||||
* It should be possible to cast oio_buf[] to WSABUF[]
|
||||
* It should be possible to cast uv_buf[] to WSABUF[]
|
||||
* see http://msdn.microsoft.com/en-us/library/ms741542(v=vs.85).aspx
|
||||
*/
|
||||
typedef struct oio_buf {
|
||||
typedef struct uv_buf {
|
||||
ULONG len;
|
||||
char* base;
|
||||
} oio_buf;
|
||||
} uv_buf;
|
||||
|
||||
#define oio_req_private_fields \
|
||||
#define uv_req_private_fields \
|
||||
union { \
|
||||
/* Used by I/O operations */ \
|
||||
struct { \
|
||||
@ -50,51 +50,51 @@ typedef struct oio_buf {
|
||||
}; \
|
||||
/* Used by timers */ \
|
||||
struct { \
|
||||
RB_ENTRY(oio_req_s) tree_entry; \
|
||||
RB_ENTRY(uv_req_s) tree_entry; \
|
||||
int64_t due; \
|
||||
}; \
|
||||
}; \
|
||||
int flags;
|
||||
|
||||
#define oio_tcp_connection_fields \
|
||||
#define uv_tcp_connection_fields \
|
||||
void* read_cb; \
|
||||
struct oio_req_s read_req; \
|
||||
struct uv_req_s read_req; \
|
||||
unsigned int write_reqs_pending; \
|
||||
oio_req_t* shutdown_req;
|
||||
uv_req_t* shutdown_req;
|
||||
|
||||
#define oio_tcp_server_fields \
|
||||
#define uv_tcp_server_fields \
|
||||
void *accept_cb; \
|
||||
SOCKET accept_socket; \
|
||||
struct oio_req_s accept_req; \
|
||||
struct uv_req_s accept_req; \
|
||||
char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
|
||||
|
||||
#define oio_tcp_fields \
|
||||
#define uv_tcp_fields \
|
||||
unsigned int reqs_pending; \
|
||||
union { \
|
||||
SOCKET socket; \
|
||||
HANDLE handle; \
|
||||
}; \
|
||||
union { \
|
||||
struct { oio_tcp_connection_fields }; \
|
||||
struct { oio_tcp_server_fields }; \
|
||||
struct { uv_tcp_connection_fields }; \
|
||||
struct { uv_tcp_server_fields }; \
|
||||
};
|
||||
|
||||
#define oio_loop_fields \
|
||||
oio_handle_t* loop_prev; \
|
||||
oio_handle_t* loop_next; \
|
||||
#define uv_loop_fields \
|
||||
uv_handle_t* loop_prev; \
|
||||
uv_handle_t* loop_next; \
|
||||
void* loop_cb;
|
||||
|
||||
#define oio_async_fields \
|
||||
struct oio_req_s async_req; \
|
||||
#define uv_async_fields \
|
||||
struct uv_req_s async_req; \
|
||||
/* char to avoid alignment issues */ \
|
||||
char volatile async_sent;
|
||||
|
||||
#define oio_handle_private_fields \
|
||||
oio_handle_t* endgame_next; \
|
||||
#define uv_handle_private_fields \
|
||||
uv_handle_t* endgame_next; \
|
||||
unsigned int flags; \
|
||||
oio_err_t error; \
|
||||
uv_err_t error; \
|
||||
union { \
|
||||
struct { oio_tcp_fields }; \
|
||||
struct { oio_loop_fields }; \
|
||||
struct { oio_async_fields }; \
|
||||
struct { uv_tcp_fields }; \
|
||||
struct { uv_loop_fields }; \
|
||||
struct { uv_async_fields }; \
|
||||
};
|
||||
271
uv.h
Normal file
271
uv.h
Normal file
@ -0,0 +1,271 @@
|
||||
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef UV_H
|
||||
#define UV_H
|
||||
|
||||
#define UV_VERSION_MAJOR 0
|
||||
#define UV_VERSION_MINOR 1
|
||||
|
||||
#include <stdint.h> /* int64_t */
|
||||
#include <sys/types.h> /* size_t */
|
||||
|
||||
typedef struct uv_err_s uv_err_t;
|
||||
typedef struct uv_handle_s uv_handle_t;
|
||||
typedef struct uv_req_s uv_req_t;
|
||||
|
||||
|
||||
#if defined(__unix__) || defined(__POSIX__) || defined(__APPLE__)
|
||||
# include "uv-unix.h"
|
||||
#else
|
||||
# include "uv-win.h"
|
||||
#endif
|
||||
|
||||
|
||||
/* The status parameter is 0 if the request completed successfully,
|
||||
* and should be -1 if the request was cancelled or failed.
|
||||
* For uv_close_cb, -1 means that the handle was closed due to an error.
|
||||
* Error details can be obtained by calling uv_last_error().
|
||||
*
|
||||
* In the case of uv_read_cb the uv_buf returned should be freed by the
|
||||
* user.
|
||||
*/
|
||||
typedef uv_buf (*uv_alloc_cb)(uv_handle_t* handle, size_t suggested_size);
|
||||
typedef void (*uv_read_cb)(uv_handle_t *handle, int nread, uv_buf buf);
|
||||
typedef void (*uv_write_cb)(uv_req_t* req, int status);
|
||||
typedef void (*uv_connect_cb)(uv_req_t* req, int status);
|
||||
typedef void (*uv_shutdown_cb)(uv_req_t* req, int status);
|
||||
typedef void (*uv_accept_cb)(uv_handle_t* handle);
|
||||
typedef void (*uv_close_cb)(uv_handle_t* handle, int status);
|
||||
typedef void (*uv_timer_cb)(uv_req_t* req, int64_t skew, int status);
|
||||
/* TODO: do loop_cb and async_cb really need a status argument? */
|
||||
typedef void (*uv_loop_cb)(uv_handle_t* handle, int status);
|
||||
typedef void (*uv_async_cb)(uv_handle_t* handle, int stats);
|
||||
|
||||
|
||||
/* Expand this list if necessary. */
|
||||
typedef enum {
|
||||
UV_UNKNOWN = -1,
|
||||
UV_OK = 0,
|
||||
UV_EOF,
|
||||
UV_EACCESS,
|
||||
UV_EAGAIN,
|
||||
UV_EADDRINUSE,
|
||||
UV_EADDRNOTAVAIL,
|
||||
UV_EAFNOSUPPORT,
|
||||
UV_EALREADY,
|
||||
UV_EBADF,
|
||||
UV_EBUSY,
|
||||
UV_ECONNABORTED,
|
||||
UV_ECONNREFUSED,
|
||||
UV_ECONNRESET,
|
||||
UV_EDESTADDRREQ,
|
||||
UV_EFAULT,
|
||||
UV_EHOSTUNREACH,
|
||||
UV_EINTR,
|
||||
UV_EINVAL,
|
||||
UV_EISCONN,
|
||||
UV_EMFILE,
|
||||
UV_ENETDOWN,
|
||||
UV_ENETUNREACH,
|
||||
UV_ENFILE,
|
||||
UV_ENOBUFS,
|
||||
UV_ENOMEM,
|
||||
UV_ENONET,
|
||||
UV_ENOPROTOOPT,
|
||||
UV_ENOTCONN,
|
||||
UV_ENOTSOCK,
|
||||
UV_ENOTSUP,
|
||||
UV_EPROTO,
|
||||
UV_EPROTONOSUPPORT,
|
||||
UV_EPROTOTYPE,
|
||||
UV_ETIMEDOUT
|
||||
} uv_err_code;
|
||||
|
||||
typedef enum {
|
||||
UV_UNKNOWN_HANDLE = 0,
|
||||
UV_TCP,
|
||||
UV_NAMED_PIPE,
|
||||
UV_TTY,
|
||||
UV_FILE,
|
||||
UV_PREPARE,
|
||||
UV_CHECK,
|
||||
UV_IDLE,
|
||||
UV_ASYNC
|
||||
} uv_handle_type;
|
||||
|
||||
typedef enum {
|
||||
UV_UNKNOWN_REQ = 0,
|
||||
UV_CONNECT,
|
||||
UV_ACCEPT,
|
||||
UV_READ,
|
||||
UV_WRITE,
|
||||
UV_SHUTDOWN,
|
||||
UV_TIMEOUT,
|
||||
UV_WAKEUP
|
||||
} uv_req_type;
|
||||
|
||||
|
||||
struct uv_err_s {
|
||||
/* read-only */
|
||||
uv_err_code code;
|
||||
/* private */
|
||||
int sys_errno_;
|
||||
};
|
||||
|
||||
|
||||
struct uv_req_s {
|
||||
/* read-only */
|
||||
uv_req_type type;
|
||||
/* public */
|
||||
uv_handle_t* handle;
|
||||
void* cb;
|
||||
void* data;
|
||||
/* private */
|
||||
uv_req_private_fields
|
||||
};
|
||||
|
||||
|
||||
struct uv_handle_s {
|
||||
/* read-only */
|
||||
uv_handle_type type;
|
||||
/* public */
|
||||
uv_close_cb close_cb;
|
||||
void* data;
|
||||
/* number of bytes queued for writing */
|
||||
size_t write_queue_size;
|
||||
/* private */
|
||||
uv_handle_private_fields
|
||||
};
|
||||
|
||||
|
||||
/* Most functions return boolean: 0 for success and -1 for failure.
|
||||
* On error the user should then call uv_last_error() to determine
|
||||
* the error code.
|
||||
*/
|
||||
uv_err_t uv_last_error();
|
||||
char* uv_strerror(uv_err_t err);
|
||||
|
||||
|
||||
void uv_init(uv_alloc_cb alloc);
|
||||
int uv_run();
|
||||
|
||||
/* Manually modify the event loop's reference count. Useful if the user wants
|
||||
* to have a handle or timeout that doesn't keep the loop alive.
|
||||
*/
|
||||
void uv_ref();
|
||||
void uv_unref();
|
||||
|
||||
void uv_update_time();
|
||||
int64_t uv_now();
|
||||
|
||||
void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb);
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* - uv_(pipe|pipe_tty)_handle_init
|
||||
* - uv_bind_pipe(char* name)
|
||||
* - uv_continuous_read(uv_handle_t* handle, uv_continuous_read_cb* cb)
|
||||
* - A way to list cancelled uv_reqs after before/on uv_close_cb
|
||||
*/
|
||||
|
||||
/* TCP socket methods.
|
||||
* Handle and callback bust be set by calling uv_req_init.
|
||||
*/
|
||||
int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
|
||||
int uv_bind(uv_handle_t* handle, struct sockaddr* addr);
|
||||
|
||||
int uv_connect(uv_req_t* req, struct sockaddr* addr);
|
||||
int uv_shutdown(uv_req_t* req);
|
||||
|
||||
/* TCP server methods. */
|
||||
int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb);
|
||||
|
||||
/* Call this after accept_cb. client does not need to be initialized. */
|
||||
int uv_accept(uv_handle_t* server, uv_handle_t* client,
|
||||
uv_close_cb close_cb, void* data);
|
||||
|
||||
|
||||
/* Read data from an incoming stream. The callback will be made several
|
||||
* several times until there is no more data to read or uv_read_stop is
|
||||
* called. When we've reached EOF nread will be set to -1 and the error is
|
||||
* set to UV_EOF. When nread == -1 the buf parameter might not point to a
|
||||
* valid buffer; in that case buf.len and buf.base are both set to 0.
|
||||
* Note that nread might also be 0, which does *not* indicate an error or
|
||||
* eof; it happens when libuv requested a buffer through the alloc callback
|
||||
* but then decided that it didn't need that buffer.
|
||||
*/
|
||||
int uv_read_start(uv_handle_t* handle, uv_read_cb cb);
|
||||
int uv_read_stop(uv_handle_t* handle);
|
||||
|
||||
int uv_write(uv_req_t* req, uv_buf bufs[], int bufcnt);
|
||||
|
||||
/* Timer methods */
|
||||
int uv_timeout(uv_req_t* req, int64_t timeout);
|
||||
|
||||
/* libev wrapper. Every active prepare handle gets its callback called
|
||||
* exactly once per loop iteration, just before the system blocks to wait
|
||||
* for completed i/o.
|
||||
*/
|
||||
int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
|
||||
int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb);
|
||||
int uv_prepare_stop(uv_handle_t* handle);
|
||||
|
||||
/* libev wrapper. Every active check handle gets its callback called exactly
|
||||
* once per loop iteration, just after the system returns from blocking.
|
||||
*/
|
||||
int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
|
||||
int uv_check_start(uv_handle_t* handle, uv_loop_cb cb);
|
||||
int uv_check_stop(uv_handle_t* handle);
|
||||
|
||||
/* libev wrapper. Every active idle handle gets its callback called repeatedly until it is
|
||||
* stopped. This happens after all other types of callbacks are processed.
|
||||
* When there are multiple "idle" handles active, their callbacks are called
|
||||
* in turn.
|
||||
*/
|
||||
int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
|
||||
int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb);
|
||||
int uv_idle_stop(uv_handle_t* handle);
|
||||
|
||||
/* libev wrapper. uv_async_send wakes up the event loop and calls the async
|
||||
* handle's callback There is no guarantee that every uv_async_send call
|
||||
* leads to exactly one invocation of the callback; The only guarantee is
|
||||
* that the callback function is called at least once after the call to
|
||||
* async_send. Unlike everything else, uv_async_send can be called from
|
||||
* another thread.
|
||||
*
|
||||
* QUESTION(ryan) Can UV_ASYNC just use uv_loop_cb? Same signature on my
|
||||
* side.
|
||||
*/
|
||||
int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
|
||||
uv_close_cb close_cb, void* data);
|
||||
int uv_async_send(uv_handle_t* handle);
|
||||
|
||||
/* Request handle to be closed. close_cb will be called
|
||||
* asynchronously after this call.
|
||||
*/
|
||||
int uv_close(uv_handle_t* handle);
|
||||
|
||||
|
||||
/* Utility */
|
||||
struct sockaddr_in uv_ip4_addr(char* ip, int port);
|
||||
|
||||
#endif /* UV_H */
|
||||
Loading…
Reference in New Issue
Block a user