Merge remote-tracking branch 'origin/v0.10'
Conflicts: ChangeLog src/unix/stream.c src/version.c
This commit is contained in:
commit
8ef9592a95
36
ChangeLog
36
ChangeLog
@ -1,3 +1,39 @@
|
|||||||
|
2013.05.29, Version 0.10.9 (Stable), a195f9ace23d92345baf57582678bfc3017e6632
|
||||||
|
|
||||||
|
Changes since version 0.10.8:
|
||||||
|
|
||||||
|
* unix: fix stream refcounting buglet (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: remove erroneous asserts (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: add uv__is_closing() macro (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: stop stream POLLOUT watcher on write error (Ben Noordhuis)
|
||||||
|
|
||||||
|
|
||||||
|
2013.05.25, Version 0.10.8 (Stable), 0f39be12926fe2d8766a9f025797a473003e6504
|
||||||
|
|
||||||
|
Changes since version 0.10.7:
|
||||||
|
|
||||||
|
* windows: make uv_spawn not fail under job control (Bert Belder)
|
||||||
|
|
||||||
|
* darwin: assume CFRunLoopStop() isn't thread-safe (Fedor Indutny)
|
||||||
|
|
||||||
|
* win: fix UV_EALREADY incorrectly set (Bert Belder)
|
||||||
|
|
||||||
|
* darwin: make two uv__cf_*() functions static (Ben Noordhuis)
|
||||||
|
|
||||||
|
* darwin: task_info() cannot fail (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: add mapping for ENETDOWN (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: implicitly signal write errors to libuv user (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: fix assert on signal pipe overflow (Bert Belder)
|
||||||
|
|
||||||
|
* unix: turn off POLLOUT after stream connect (Ben Noordhuis)
|
||||||
|
|
||||||
|
|
||||||
2013.05.16, Version 0.11.3 (Unstable), 0a48c05b5988aea84c605751900926fa25443b34
|
2013.05.16, Version 0.11.3 (Unstable), 0a48c05b5988aea84c605751900926fa25443b34
|
||||||
|
|
||||||
Changes since version 0.11.2:
|
Changes since version 0.11.2:
|
||||||
|
|||||||
21
include/uv.h
21
include/uv.h
@ -283,7 +283,28 @@ UV_EXTERN void uv_ref(uv_handle_t*);
|
|||||||
UV_EXTERN void uv_unref(uv_handle_t*);
|
UV_EXTERN void uv_unref(uv_handle_t*);
|
||||||
UV_EXTERN int uv_has_ref(const uv_handle_t*);
|
UV_EXTERN int uv_has_ref(const uv_handle_t*);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update the event loop's concept of "now". Libuv caches the current time
|
||||||
|
* at the start of the event loop tick in order to reduce the number of
|
||||||
|
* time-related system calls.
|
||||||
|
*
|
||||||
|
* You won't normally need to call this function unless you have callbacks
|
||||||
|
* that block the event loop for longer periods of time, where "longer" is
|
||||||
|
* somewhat subjective but probably on the order of a millisecond or more.
|
||||||
|
*/
|
||||||
UV_EXTERN void uv_update_time(uv_loop_t*);
|
UV_EXTERN void uv_update_time(uv_loop_t*);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the current timestamp in milliseconds. The timestamp is cached at
|
||||||
|
* the start of the event loop tick, see |uv_update_time()| for details and
|
||||||
|
* rationale.
|
||||||
|
*
|
||||||
|
* The timestamp increases monotonically from some arbitrary point in time.
|
||||||
|
* Don't make assumptions about the starting point, you will only get
|
||||||
|
* disappointed.
|
||||||
|
*
|
||||||
|
* Use uv_hrtime() if you need sub-milliseond granularity.
|
||||||
|
*/
|
||||||
UV_EXTERN uint64_t uv_now(uv_loop_t*);
|
UV_EXTERN uint64_t uv_now(uv_loop_t*);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@ -162,7 +162,12 @@ void uv__make_close_pending(uv_handle_t* handle) {
|
|||||||
|
|
||||||
|
|
||||||
static void uv__finish_close(uv_handle_t* handle) {
|
static void uv__finish_close(uv_handle_t* handle) {
|
||||||
assert(!uv__is_active(handle));
|
/* Note: while the handle is in the UV_CLOSING state now, it's still possible
|
||||||
|
* for it to be active in the sense that uv__is_active() returns true.
|
||||||
|
* A good example is when the user calls uv_shutdown(), immediately followed
|
||||||
|
* by uv_close(). The handle is considered active at this point because the
|
||||||
|
* completion of the shutdown req is still pending.
|
||||||
|
*/
|
||||||
assert(handle->flags & UV_CLOSING);
|
assert(handle->flags & UV_CLOSING);
|
||||||
assert(!(handle->flags & UV_CLOSED));
|
assert(!(handle->flags & UV_CLOSED));
|
||||||
handle->flags |= UV_CLOSED;
|
handle->flags |= UV_CLOSED;
|
||||||
@ -220,7 +225,7 @@ static void uv__run_closing_handles(uv_loop_t* loop) {
|
|||||||
|
|
||||||
|
|
||||||
int uv_is_closing(const uv_handle_t* handle) {
|
int uv_is_closing(const uv_handle_t* handle) {
|
||||||
return handle->flags & (UV_CLOSING | UV_CLOSED);
|
return uv__is_closing(handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -335,11 +340,6 @@ void uv_update_time(uv_loop_t* loop) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint64_t uv_now(uv_loop_t* loop) {
|
|
||||||
return loop->time;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int uv_is_active(const uv_handle_t* handle) {
|
int uv_is_active(const uv_handle_t* handle) {
|
||||||
return uv__is_active(handle);
|
return uv__is_active(handle);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -38,8 +38,8 @@
|
|||||||
#include <unistd.h> /* sysconf */
|
#include <unistd.h> /* sysconf */
|
||||||
|
|
||||||
/* Forward declarations */
|
/* Forward declarations */
|
||||||
void uv__cf_loop_runner(void* arg);
|
static void uv__cf_loop_runner(void* arg);
|
||||||
void uv__cf_loop_cb(void* arg);
|
static void uv__cf_loop_cb(void* arg);
|
||||||
|
|
||||||
typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
|
typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
|
||||||
struct uv__cf_loop_signal_s {
|
struct uv__cf_loop_signal_s {
|
||||||
@ -102,7 +102,7 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void uv__cf_loop_runner(void* arg) {
|
static void uv__cf_loop_runner(void* arg) {
|
||||||
uv_loop_t* loop;
|
uv_loop_t* loop;
|
||||||
|
|
||||||
loop = arg;
|
loop = arg;
|
||||||
@ -124,7 +124,7 @@ void uv__cf_loop_runner(void* arg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void uv__cf_loop_cb(void* arg) {
|
static void uv__cf_loop_cb(void* arg) {
|
||||||
uv_loop_t* loop;
|
uv_loop_t* loop;
|
||||||
QUEUE* item;
|
QUEUE* item;
|
||||||
QUEUE split_head;
|
QUEUE split_head;
|
||||||
@ -257,19 +257,21 @@ void uv_loadavg(double avg[3]) {
|
|||||||
|
|
||||||
|
|
||||||
uv_err_t uv_resident_set_memory(size_t* rss) {
|
uv_err_t uv_resident_set_memory(size_t* rss) {
|
||||||
struct task_basic_info t_info;
|
mach_msg_type_number_t count;
|
||||||
mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT;
|
task_basic_info_data_t info;
|
||||||
|
kern_return_t err;
|
||||||
|
|
||||||
int r = task_info(mach_task_self(),
|
count = TASK_BASIC_INFO_COUNT;
|
||||||
TASK_BASIC_INFO,
|
err = task_info(mach_task_self(),
|
||||||
(task_info_t)&t_info,
|
TASK_BASIC_INFO,
|
||||||
&t_info_count);
|
(task_info_t) &info,
|
||||||
|
&count);
|
||||||
if (r != KERN_SUCCESS) {
|
(void) &err;
|
||||||
return uv__new_sys_error(errno);
|
/* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than
|
||||||
}
|
* KERN_SUCCESS implies a libuv bug.
|
||||||
|
*/
|
||||||
*rss = t_info.resident_size;
|
assert(err == KERN_SUCCESS);
|
||||||
|
*rss = info.resident_size;
|
||||||
|
|
||||||
return uv_ok_;
|
return uv_ok_;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -79,6 +79,7 @@ uv_err_code uv_translate_sys_error(int sys_errno) {
|
|||||||
case EMSGSIZE: return UV_EMSGSIZE;
|
case EMSGSIZE: return UV_EMSGSIZE;
|
||||||
case ENAMETOOLONG: return UV_ENAMETOOLONG;
|
case ENAMETOOLONG: return UV_ENAMETOOLONG;
|
||||||
case EINVAL: return UV_EINVAL;
|
case EINVAL: return UV_EINVAL;
|
||||||
|
case ENETDOWN: return UV_ENETDOWN;
|
||||||
case ENETUNREACH: return UV_ENETUNREACH;
|
case ENETUNREACH: return UV_ENETUNREACH;
|
||||||
case ECONNABORTED: return UV_ECONNABORTED;
|
case ECONNABORTED: return UV_ECONNABORTED;
|
||||||
case ELOOP: return UV_ELOOP;
|
case ELOOP: return UV_ELOOP;
|
||||||
|
|||||||
@ -436,11 +436,14 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
|
|||||||
* non-blocking mode and not all data could be written. If a non-zero
|
* non-blocking mode and not all data could be written. If a non-zero
|
||||||
* number of bytes have been sent, we don't consider it an error.
|
* number of bytes have been sent, we don't consider it an error.
|
||||||
*/
|
*/
|
||||||
len = 0;
|
|
||||||
|
|
||||||
#if defined(__FreeBSD__)
|
#if defined(__FreeBSD__)
|
||||||
|
len = 0;
|
||||||
r = sendfile(in_fd, out_fd, req->off, req->len, NULL, &len, 0);
|
r = sendfile(in_fd, out_fd, req->off, req->len, NULL, &len, 0);
|
||||||
#else
|
#else
|
||||||
|
/* The darwin sendfile takes len as an input for the length to send,
|
||||||
|
* so make sure to initialize it with the caller's value. */
|
||||||
|
len = req->len;
|
||||||
r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
|
r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@ -382,12 +382,12 @@ uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
|||||||
return uv__new_sys_error(ENOMEM);
|
return uv__new_sys_error(ENOMEM);
|
||||||
|
|
||||||
if (read_models(numcpus, ci)) {
|
if (read_models(numcpus, ci)) {
|
||||||
SAVE_ERRNO(free(ci));
|
SAVE_ERRNO(uv_free_cpu_info(ci, numcpus));
|
||||||
return uv__new_sys_error(errno);
|
return uv__new_sys_error(errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (read_times(numcpus, ci)) {
|
if (read_times(numcpus, ci)) {
|
||||||
SAVE_ERRNO(free(ci));
|
SAVE_ERRNO(uv_free_cpu_info(ci, numcpus));
|
||||||
return uv__new_sys_error(errno);
|
return uv__new_sys_error(errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,76 +414,89 @@ static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
|
|||||||
|
|
||||||
/* Also reads the CPU frequency on x86. The other architectures only have
|
/* Also reads the CPU frequency on x86. The other architectures only have
|
||||||
* a BogoMIPS field, which may not be very accurate.
|
* a BogoMIPS field, which may not be very accurate.
|
||||||
|
*
|
||||||
|
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
|
||||||
*/
|
*/
|
||||||
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
|
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
|
||||||
#if defined(__i386__) || defined(__x86_64__)
|
|
||||||
static const char model_marker[] = "model name\t: ";
|
static const char model_marker[] = "model name\t: ";
|
||||||
static const char speed_marker[] = "cpu MHz\t\t: ";
|
static const char speed_marker[] = "cpu MHz\t\t: ";
|
||||||
#elif defined(__arm__)
|
const char* inferred_model;
|
||||||
static const char model_marker[] = "Processor\t: ";
|
|
||||||
static const char speed_marker[] = "";
|
|
||||||
#elif defined(__mips__)
|
|
||||||
static const char model_marker[] = "cpu model\t\t: ";
|
|
||||||
static const char speed_marker[] = "";
|
|
||||||
#else
|
|
||||||
# warning uv_cpu_info() is not supported on this architecture.
|
|
||||||
static const char model_marker[] = "";
|
|
||||||
static const char speed_marker[] = "";
|
|
||||||
#endif
|
|
||||||
static const char bogus_model[] = "unknown";
|
|
||||||
unsigned int model_idx;
|
unsigned int model_idx;
|
||||||
unsigned int speed_idx;
|
unsigned int speed_idx;
|
||||||
char buf[1024];
|
char buf[1024];
|
||||||
char* model;
|
char* model;
|
||||||
FILE* fp;
|
FILE* fp;
|
||||||
char* inferred_model;
|
|
||||||
|
|
||||||
fp = fopen("/proc/cpuinfo", "r");
|
/* Most are unused on non-ARM and non-x86 architectures. */
|
||||||
if (fp == NULL)
|
(void) &model_marker;
|
||||||
return -1;
|
(void) &speed_marker;
|
||||||
|
(void) &speed_idx;
|
||||||
|
(void) &model;
|
||||||
|
(void) &buf;
|
||||||
|
(void) &fp;
|
||||||
|
|
||||||
model_idx = 0;
|
model_idx = 0;
|
||||||
speed_idx = 0;
|
speed_idx = 0;
|
||||||
|
|
||||||
|
#if defined(__arm__) || defined(__i386__) || defined(__x86_64__)
|
||||||
|
fp = fopen("/proc/cpuinfo", "r");
|
||||||
|
if (fp == NULL)
|
||||||
|
return -1;
|
||||||
|
|
||||||
while (fgets(buf, sizeof(buf), fp)) {
|
while (fgets(buf, sizeof(buf), fp)) {
|
||||||
if (model_marker[0] != '\0' &&
|
if (model_idx < numcpus) {
|
||||||
model_idx < numcpus &&
|
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
|
||||||
strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0)
|
model = buf + sizeof(model_marker) - 1;
|
||||||
{
|
model = strndup(model, strlen(model) - 1); /* Strip newline. */
|
||||||
model = buf + sizeof(model_marker) - 1;
|
if (model == NULL) {
|
||||||
model = strndup(model, strlen(model) - 1); /* strip newline */
|
fclose(fp);
|
||||||
ci[model_idx++].model = model;
|
return -1;
|
||||||
continue;
|
}
|
||||||
|
ci[model_idx++].model = model;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
#if defined(__arm__)
|
||||||
if (speed_marker[0] != '\0' &&
|
/* Fallback for pre-3.8 kernels. */
|
||||||
speed_idx < numcpus &&
|
if (model_idx < numcpus) {
|
||||||
strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0)
|
static const char model_marker[] = "Processor\t: ";
|
||||||
{
|
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
|
||||||
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
|
model = buf + sizeof(model_marker) - 1;
|
||||||
continue;
|
model = strndup(model, strlen(model) - 1); /* Strip newline. */
|
||||||
|
if (model == NULL) {
|
||||||
|
fclose(fp);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ci[model_idx++].model = model;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
#else /* !__arm____ */
|
||||||
|
if (speed_idx < numcpus) {
|
||||||
|
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
|
||||||
|
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* __arm__ */
|
||||||
}
|
}
|
||||||
|
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
|
#endif /* __arm__ || __i386__ || __x86_64__ */
|
||||||
|
|
||||||
/* Now we want to make sure that all the models contain *something*:
|
/* Now we want to make sure that all the models contain *something* because
|
||||||
* it's not safe to leave them as null.
|
* it's not safe to leave them as null. Copy the last entry unless there
|
||||||
|
* isn't one, in that case we simply put "unknown" into everything.
|
||||||
*/
|
*/
|
||||||
if (model_idx == 0) {
|
inferred_model = "unknown";
|
||||||
/* No models at all: fake up the first one. */
|
if (model_idx > 0)
|
||||||
ci[0].model = strndup(bogus_model, sizeof(bogus_model) - 1);
|
inferred_model = ci[model_idx - 1].model;
|
||||||
model_idx = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Not enough models, but we do have at least one. So we'll just
|
|
||||||
* copy the rest down: it might be better to indicate somehow that
|
|
||||||
* the remaining ones have been guessed.
|
|
||||||
*/
|
|
||||||
inferred_model = ci[model_idx - 1].model;
|
|
||||||
|
|
||||||
while (model_idx < numcpus) {
|
while (model_idx < numcpus) {
|
||||||
ci[model_idx].model = strndup(inferred_model, strlen(inferred_model));
|
model = strndup(inferred_model, strlen(inferred_model));
|
||||||
model_idx++;
|
if (model == NULL)
|
||||||
|
return -1;
|
||||||
|
ci[model_idx++].model = model;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@ -160,7 +160,7 @@ static void uv__signal_handler(int signum) {
|
|||||||
} while (r == -1 && errno == EINTR);
|
} while (r == -1 && errno == EINTR);
|
||||||
|
|
||||||
assert(r == sizeof msg ||
|
assert(r == sizeof msg ||
|
||||||
(r == -1 && errno != EAGAIN && errno != EWOULDBLOCK));
|
(r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)));
|
||||||
|
|
||||||
if (r != -1)
|
if (r != -1)
|
||||||
handle->caught_signals++;
|
handle->caught_signals++;
|
||||||
|
|||||||
@ -60,6 +60,7 @@ static void uv__stream_connect(uv_stream_t*);
|
|||||||
static void uv__write(uv_stream_t* stream);
|
static void uv__write(uv_stream_t* stream);
|
||||||
static void uv__read(uv_stream_t* stream);
|
static void uv__read(uv_stream_t* stream);
|
||||||
static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
|
static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
|
||||||
|
static size_t uv__write_req_size(uv_write_t* req);
|
||||||
|
|
||||||
|
|
||||||
/* Used by the accept() EMFILE party trick. */
|
/* Used by the accept() EMFILE party trick. */
|
||||||
@ -399,6 +400,7 @@ void uv__stream_destroy(uv_stream_t* stream) {
|
|||||||
|
|
||||||
if (req->bufs != req->bufsml)
|
if (req->bufs != req->bufsml)
|
||||||
free(req->bufs);
|
free(req->bufs);
|
||||||
|
req->bufs = NULL;
|
||||||
|
|
||||||
if (req->cb) {
|
if (req->cb) {
|
||||||
uv__set_artificial_error(req->handle->loop, UV_ECANCELED);
|
uv__set_artificial_error(req->handle->loop, UV_ECANCELED);
|
||||||
@ -413,6 +415,13 @@ void uv__stream_destroy(uv_stream_t* stream) {
|
|||||||
req = QUEUE_DATA(q, uv_write_t, queue);
|
req = QUEUE_DATA(q, uv_write_t, queue);
|
||||||
uv__req_unregister(stream->loop, req);
|
uv__req_unregister(stream->loop, req);
|
||||||
|
|
||||||
|
if (req->bufs != NULL) {
|
||||||
|
stream->write_queue_size -= uv__write_req_size(req);
|
||||||
|
if (req->bufs != req->bufsml)
|
||||||
|
free(req->bufs);
|
||||||
|
req->bufs = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (req->cb) {
|
if (req->cb) {
|
||||||
uv__set_sys_error(stream->loop, req->error);
|
uv__set_sys_error(stream->loop, req->error);
|
||||||
req->cb(req, req->error ? -1 : 0);
|
req->cb(req, req->error ? -1 : 0);
|
||||||
@ -420,6 +429,11 @@ void uv__stream_destroy(uv_stream_t* stream) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (stream->shutdown_req) {
|
if (stream->shutdown_req) {
|
||||||
|
/* The UV_ECANCELED error code is a lie, the shutdown(2) syscall is a
|
||||||
|
* fait accompli at this point. Maybe we should revisit this in v0.11.
|
||||||
|
* A possible reason for leaving it unchanged is that it informs the
|
||||||
|
* callee that the handle has been destroyed.
|
||||||
|
*/
|
||||||
uv__req_unregister(stream->loop, stream->shutdown_req);
|
uv__req_unregister(stream->loop, stream->shutdown_req);
|
||||||
uv__set_artificial_error(stream->loop, UV_ECANCELED);
|
uv__set_artificial_error(stream->loop, UV_ECANCELED);
|
||||||
stream->shutdown_req->cb(stream->shutdown_req, -1);
|
stream->shutdown_req->cb(stream->shutdown_req, -1);
|
||||||
@ -601,8 +615,6 @@ static void uv__drain(uv_stream_t* stream) {
|
|||||||
uv_shutdown_t* req;
|
uv_shutdown_t* req;
|
||||||
|
|
||||||
assert(QUEUE_EMPTY(&stream->write_queue));
|
assert(QUEUE_EMPTY(&stream->write_queue));
|
||||||
assert(stream->write_queue_size == 0);
|
|
||||||
|
|
||||||
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
|
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
|
||||||
|
|
||||||
/* Shutdown? */
|
/* Shutdown? */
|
||||||
@ -635,6 +647,7 @@ static void uv__drain(uv_stream_t* stream) {
|
|||||||
static size_t uv__write_req_size(uv_write_t* req) {
|
static size_t uv__write_req_size(uv_write_t* req) {
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
|
assert(req->bufs != NULL);
|
||||||
size = uv__buf_count(req->bufs + req->write_index,
|
size = uv__buf_count(req->bufs + req->write_index,
|
||||||
req->bufcnt - req->write_index);
|
req->bufcnt - req->write_index);
|
||||||
assert(req->handle->write_queue_size >= size);
|
assert(req->handle->write_queue_size >= size);
|
||||||
@ -648,10 +661,18 @@ static void uv__write_req_finish(uv_write_t* req) {
|
|||||||
|
|
||||||
/* Pop the req off tcp->write_queue. */
|
/* Pop the req off tcp->write_queue. */
|
||||||
QUEUE_REMOVE(&req->queue);
|
QUEUE_REMOVE(&req->queue);
|
||||||
if (req->bufs != req->bufsml) {
|
|
||||||
free(req->bufs);
|
/* Only free when there was no error. On error, we touch up write_queue_size
|
||||||
|
* right before making the callback. The reason we don't do that right away
|
||||||
|
* is that a write_queue_size > 0 is our only way to signal to the user that
|
||||||
|
* he should stop writing - which he should if we got an error. Something to
|
||||||
|
* revisit in future revisions of the libuv API.
|
||||||
|
*/
|
||||||
|
if (req->error == 0) {
|
||||||
|
if (req->bufs != req->bufsml)
|
||||||
|
free(req->bufs);
|
||||||
|
req->bufs = NULL;
|
||||||
}
|
}
|
||||||
req->bufs = NULL;
|
|
||||||
|
|
||||||
/* Add it to the write_completed_queue where it will have its
|
/* Add it to the write_completed_queue where it will have its
|
||||||
* callback called in the near future.
|
* callback called in the near future.
|
||||||
@ -687,10 +708,8 @@ start:
|
|||||||
|
|
||||||
assert(uv__stream_fd(stream) >= 0);
|
assert(uv__stream_fd(stream) >= 0);
|
||||||
|
|
||||||
if (QUEUE_EMPTY(&stream->write_queue)) {
|
if (QUEUE_EMPTY(&stream->write_queue))
|
||||||
assert(stream->write_queue_size == 0);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
q = QUEUE_HEAD(&stream->write_queue);
|
q = QUEUE_HEAD(&stream->write_queue);
|
||||||
req = QUEUE_DATA(q, uv_write_t, queue);
|
req = QUEUE_DATA(q, uv_write_t, queue);
|
||||||
@ -761,8 +780,10 @@ start:
|
|||||||
if (errno != EAGAIN && errno != EWOULDBLOCK) {
|
if (errno != EAGAIN && errno != EWOULDBLOCK) {
|
||||||
/* Error */
|
/* Error */
|
||||||
req->error = errno;
|
req->error = errno;
|
||||||
stream->write_queue_size -= uv__write_req_size(req);
|
|
||||||
uv__write_req_finish(req);
|
uv__write_req_finish(req);
|
||||||
|
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
|
||||||
|
if (!uv__io_active(&stream->io_watcher, UV__POLLIN))
|
||||||
|
uv__handle_stop(stream);
|
||||||
return;
|
return;
|
||||||
} else if (stream->flags & UV_STREAM_BLOCKING) {
|
} else if (stream->flags & UV_STREAM_BLOCKING) {
|
||||||
/* If this is a blocking stream, try again. */
|
/* If this is a blocking stream, try again. */
|
||||||
@ -838,6 +859,13 @@ static void uv__write_callbacks(uv_stream_t* stream) {
|
|||||||
QUEUE_REMOVE(q);
|
QUEUE_REMOVE(q);
|
||||||
uv__req_unregister(stream->loop, req);
|
uv__req_unregister(stream->loop, req);
|
||||||
|
|
||||||
|
if (req->bufs != NULL) {
|
||||||
|
stream->write_queue_size -= uv__write_req_size(req);
|
||||||
|
if (req->bufs != req->bufsml)
|
||||||
|
free(req->bufs);
|
||||||
|
req->bufs = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* NOTE: call callback AFTER freeing the request data. */
|
/* NOTE: call callback AFTER freeing the request data. */
|
||||||
if (req->cb) {
|
if (req->cb) {
|
||||||
uv__set_sys_error(stream->loop, req->error);
|
uv__set_sys_error(stream->loop, req->error);
|
||||||
@ -1119,6 +1147,7 @@ static void uv__stream_connect(uv_stream_t* stream) {
|
|||||||
|
|
||||||
stream->connect_req = NULL;
|
stream->connect_req = NULL;
|
||||||
uv__req_unregister(stream->loop, req);
|
uv__req_unregister(stream->loop, req);
|
||||||
|
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
|
||||||
|
|
||||||
if (req->cb) {
|
if (req->cb) {
|
||||||
uv__set_sys_error(stream->loop, error);
|
uv__set_sys_error(stream->loop, error);
|
||||||
@ -1158,6 +1187,12 @@ int uv_write2(uv_write_t* req,
|
|||||||
return uv__set_artificial_error(stream->loop, UV_EBADF);
|
return uv__set_artificial_error(stream->loop, UV_EBADF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* It's legal for write_queue_size > 0 even when the write_queue is empty;
|
||||||
|
* it means there are error-state requests in the write_completed_queue that
|
||||||
|
* will touch up write_queue_size later, see also uv__write_req_finish().
|
||||||
|
* We chould check that write_queue is empty instead but that implies making
|
||||||
|
* a write() syscall when we know that the handle is in error mode.
|
||||||
|
*/
|
||||||
empty_queue = (stream->write_queue_size == 0);
|
empty_queue = (stream->write_queue_size == 0);
|
||||||
|
|
||||||
/* Initialize the req */
|
/* Initialize the req */
|
||||||
@ -1266,9 +1301,20 @@ int uv_read2_start(uv_stream_t* stream, uv_alloc_cb alloc_cb,
|
|||||||
|
|
||||||
|
|
||||||
int uv_read_stop(uv_stream_t* stream) {
|
int uv_read_stop(uv_stream_t* stream) {
|
||||||
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLIN);
|
/* Sanity check. We're going to stop the handle unless it's primed for
|
||||||
uv__handle_stop(stream);
|
* writing but that means there should be some kind of write action in
|
||||||
|
* progress.
|
||||||
|
*/
|
||||||
|
assert(!uv__io_active(&stream->io_watcher, UV__POLLOUT) ||
|
||||||
|
!QUEUE_EMPTY(&stream->write_completed_queue) ||
|
||||||
|
!QUEUE_EMPTY(&stream->write_queue) ||
|
||||||
|
stream->shutdown_req != NULL ||
|
||||||
|
stream->connect_req != NULL);
|
||||||
|
|
||||||
stream->flags &= ~UV_STREAM_READING;
|
stream->flags &= ~UV_STREAM_READING;
|
||||||
|
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLIN);
|
||||||
|
if (!uv__io_active(&stream->io_watcher, UV__POLLOUT))
|
||||||
|
uv__handle_stop(stream);
|
||||||
|
|
||||||
#if defined(__APPLE__)
|
#if defined(__APPLE__)
|
||||||
/* Notify select() thread about state change */
|
/* Notify select() thread about state change */
|
||||||
|
|||||||
@ -434,3 +434,8 @@ int uv_has_ref(const uv_handle_t* handle) {
|
|||||||
void uv_stop(uv_loop_t* loop) {
|
void uv_stop(uv_loop_t* loop) {
|
||||||
loop->stop_flag = 1;
|
loop->stop_flag = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
uint64_t uv_now(uv_loop_t* loop) {
|
||||||
|
return loop->time;
|
||||||
|
}
|
||||||
|
|||||||
@ -150,6 +150,9 @@ void uv__fs_poll_close(uv_fs_poll_t* handle);
|
|||||||
#define uv__is_active(h) \
|
#define uv__is_active(h) \
|
||||||
(((h)->flags & UV__HANDLE_ACTIVE) != 0)
|
(((h)->flags & UV__HANDLE_ACTIVE) != 0)
|
||||||
|
|
||||||
|
#define uv__is_closing(h) \
|
||||||
|
(((h)->flags & (UV_CLOSING | UV_CLOSED)) != 0)
|
||||||
|
|
||||||
#define uv__handle_start(h) \
|
#define uv__handle_start(h) \
|
||||||
do { \
|
do { \
|
||||||
assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \
|
assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \
|
||||||
|
|||||||
@ -64,11 +64,6 @@ void uv__time_forward(uv_loop_t* loop, uint64_t msecs) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint64_t uv_now(uv_loop_t* loop) {
|
|
||||||
return loop->time;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) {
|
static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) {
|
||||||
if (a->due < b->due)
|
if (a->due < b->due)
|
||||||
return -1;
|
return -1;
|
||||||
|
|||||||
@ -62,7 +62,7 @@ TEST_IMPL(osx_select) {
|
|||||||
|
|
||||||
uv_read_start((uv_stream_t*) &tty, alloc_cb, read_cb);
|
uv_read_start((uv_stream_t*) &tty, alloc_cb, read_cb);
|
||||||
|
|
||||||
// Emulate user-input
|
/* Emulate user-input */
|
||||||
str = "got some input\n"
|
str = "got some input\n"
|
||||||
"with a couple of lines\n"
|
"with a couple of lines\n"
|
||||||
"feel pretty happy\n";
|
"feel pretty happy\n";
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user