win: simplify global loops queue

https://github.com/libuv/libuv/pull/962 had to be made the complicated way
to keep ABI stability in v1.x, but we can fix that now.

PR-URL: https://github.com/libuv/libuv/pull/1001
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
This commit is contained in:
Saúl Ibarra Corretgé 2016-08-18 01:48:38 +02:00
parent 805391bfaa
commit 1f21b19e08
2 changed files with 14 additions and 74 deletions

View File

@ -210,7 +210,7 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
uv_handle_t* endgame_handles; \
/* The head of the timers tree */ \
struct uv_timer_tree_s timers; \
/* Lists of active loop (prepare / check / idle) watchers */ \
/* Lists of active loop (prepare / check / idle) watchers */ \
void* prepare_handles[2]; \
void* check_handles[2]; \
void* idle_handles[2]; \
@ -224,7 +224,9 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
uv_async_t wq_async; \
/* Async handle */ \
struct uv_req_s async_req; \
void* async_handles[2];
void* async_handles[2]; \
/* Global queue of loops */ \
void* loops_queue[2];
#define UV_REQ_TYPE_PRIVATE \
/* TODO: remove the req suffix */ \

View File

@ -79,93 +79,32 @@ static void uv__crt_invalid_parameter_handler(const wchar_t* expression,
}
#endif
static uv_loop_t** uv__loops;
static int uv__loops_size;
static int uv__loops_capacity;
#define UV__LOOPS_CHUNK_SIZE 8
static void* uv__loops[2];
static uv_mutex_t uv__loops_lock;
static void uv__loops_init() {
uv_mutex_init(&uv__loops_lock);
uv__loops = uv__calloc(UV__LOOPS_CHUNK_SIZE, sizeof(uv_loop_t*));
if (!uv__loops)
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
uv__loops_size = 0;
uv__loops_capacity = UV__LOOPS_CHUNK_SIZE;
QUEUE_INIT(&uv__loops);
}
static int uv__loops_add(uv_loop_t* loop) {
uv_loop_t** new_loops;
int new_capacity, i;
static void uv__loops_add(uv_loop_t* loop) {
uv_mutex_lock(&uv__loops_lock);
if (uv__loops_size == uv__loops_capacity) {
new_capacity = uv__loops_capacity + UV__LOOPS_CHUNK_SIZE;
new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * new_capacity);
if (!new_loops)
goto failed_loops_realloc;
uv__loops = new_loops;
for (i = uv__loops_capacity; i < new_capacity; ++i)
uv__loops[i] = NULL;
uv__loops_capacity = new_capacity;
}
uv__loops[uv__loops_size] = loop;
++uv__loops_size;
QUEUE_INSERT_TAIL(&uv__loops, &loop->loops_queue);
uv_mutex_unlock(&uv__loops_lock);
return 0;
failed_loops_realloc:
uv_mutex_unlock(&uv__loops_lock);
return ERROR_OUTOFMEMORY;
}
static void uv__loops_remove(uv_loop_t* loop) {
int loop_index;
int smaller_capacity;
uv_loop_t** new_loops;
uv_mutex_lock(&uv__loops_lock);
for (loop_index = 0; loop_index < uv__loops_size; ++loop_index) {
if (uv__loops[loop_index] == loop)
break;
}
/* If loop was not found, ignore */
if (loop_index == uv__loops_size)
goto loop_removed;
uv__loops[loop_index] = uv__loops[uv__loops_size - 1];
uv__loops[uv__loops_size - 1] = NULL;
--uv__loops_size;
/* If we didn't grow to big skip downsizing */
if (uv__loops_capacity < 4 * UV__LOOPS_CHUNK_SIZE)
goto loop_removed;
/* Downsize only if more than half of buffer is free */
smaller_capacity = uv__loops_capacity / 2;
if (uv__loops_size >= smaller_capacity)
goto loop_removed;
new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * smaller_capacity);
if (!new_loops)
goto loop_removed;
uv__loops = new_loops;
uv__loops_capacity = smaller_capacity;
loop_removed:
QUEUE_REMOVE(&loop->loops_queue);
uv_mutex_unlock(&uv__loops_lock);
}
void uv__wake_all_loops() {
int i;
uv_loop_t* loop;
QUEUE* q;
uv_mutex_lock(&uv__loops_lock);
for (i = 0; i < uv__loops_size; ++i) {
loop = uv__loops[i];
assert(loop);
QUEUE_FOREACH(q, &uv__loops) {
uv_loop_t* loop = QUEUE_DATA(q, uv_loop_t, loops_queue);
if (loop->iocp != INVALID_HANDLE_VALUE)
PostQueuedCompletionStatus(loop->iocp, 0, 0, NULL);
}
@ -272,9 +211,8 @@ int uv_loop_init(uv_loop_t* loop) {
uv__handle_unref(&loop->wq_async);
loop->wq_async.flags |= UV__HANDLE_INTERNAL;
err = uv__loops_add(loop);
if (err)
goto fail_async_init;
QUEUE_INIT(&loop->loops_queue);
uv__loops_add(loop);
return 0;