test: make threadpool saturation reliable

Use a deterministic approach for saturating the threadpool instead of
depending on a timeout.  Should help reduce the flakiness of the CI.

PR-URL: https://github.com/libuv/libuv/pull/1014
Reviewed-By: Saúl Ibarra Corretgé <saghul@gmail.com>
This commit is contained in:
Ben Noordhuis 2016-08-23 13:15:11 +02:00
parent 39baaa58b7
commit 538782fbda

View File

@ -37,82 +37,45 @@ struct cancel_info {
uv_timer_t timer_handle;
};
struct suspend_req {
uv_work_t req;
uv_sem_t sem;
};
static uv_cond_t signal_cond;
static uv_mutex_t signal_mutex;
static uv_mutex_t wait_mutex;
static unsigned num_threads;
static unsigned fs_cb_called;
static unsigned work_cb_called;
static unsigned done_cb_called;
static unsigned done2_cb_called;
static unsigned timer_cb_called;
static uv_work_t pause_reqs[4];
static uv_sem_t pause_sems[ARRAY_SIZE(pause_reqs)];
static void work_cb(uv_work_t* req) {
uv_mutex_lock(&signal_mutex);
uv_cond_signal(&signal_cond);
uv_mutex_unlock(&signal_mutex);
uv_mutex_lock(&wait_mutex);
uv_mutex_unlock(&wait_mutex);
work_cb_called++;
uv_sem_wait(pause_sems + (req - pause_reqs));
}
static void done_cb(uv_work_t* req, int status) {
done_cb_called++;
free(req);
uv_sem_destroy(pause_sems + (req - pause_reqs));
}
static void saturate_threadpool(void) {
uv_work_t* req;
uv_loop_t* loop;
char buf[64];
size_t i;
ASSERT(0 == uv_cond_init(&signal_cond));
ASSERT(0 == uv_mutex_init(&signal_mutex));
ASSERT(0 == uv_mutex_init(&wait_mutex));
snprintf(buf, sizeof(buf), "UV_THREADPOOL_SIZE=%zu", ARRAY_SIZE(pause_reqs));
putenv(buf);
uv_mutex_lock(&signal_mutex);
uv_mutex_lock(&wait_mutex);
for (num_threads = 0; /* empty */; num_threads++) {
req = malloc(sizeof(*req));
ASSERT(req != NULL);
ASSERT(0 == uv_queue_work(uv_default_loop(), req, work_cb, done_cb));
/* Expect to get signalled within 350 ms, otherwise assume that
* the thread pool is saturated. As with any timing dependent test,
* this is obviously not ideal.
*/
if (uv_cond_timedwait(&signal_cond,
&signal_mutex,
(uint64_t) (350 * 1e6))) {
ASSERT(0 == uv_cancel((uv_req_t*) req));
break;
}
loop = uv_default_loop();
for (i = 0; i < ARRAY_SIZE(pause_reqs); i += 1) {
ASSERT(0 == uv_sem_init(pause_sems + i, 0));
ASSERT(0 == uv_queue_work(loop, pause_reqs + i, work_cb, done_cb));
}
}
static void unblock_threadpool(void) {
uv_mutex_unlock(&signal_mutex);
uv_mutex_unlock(&wait_mutex);
}
size_t i;
static void cleanup_threadpool(void) {
ASSERT(done_cb_called == num_threads + 1); /* +1 == cancelled work req. */
ASSERT(work_cb_called == num_threads);
uv_cond_destroy(&signal_cond);
uv_mutex_destroy(&signal_mutex);
uv_mutex_destroy(&wait_mutex);
for (i = 0; i < ARRAY_SIZE(pause_reqs); i += 1)
uv_sem_post(pause_sems + i);
}
@ -171,14 +134,6 @@ static void timer_cb(uv_timer_t* handle) {
}
static void suspend(uv_work_t* req) {
struct suspend_req *s;
s = container_of(req, struct suspend_req, req);
uv_sem_wait(&s->sem);
}
static void nop_done_cb(uv_work_t* req, int status) {
ASSERT(status == UV_ECANCELED);
done_cb_called++;
@ -213,8 +168,6 @@ TEST_IMPL(threadpool_cancel_getaddrinfo) {
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
ASSERT(1 == timer_cb_called);
cleanup_threadpool();
MAKE_VALGRIND_HAPPY();
return 0;
}
@ -251,8 +204,6 @@ TEST_IMPL(threadpool_cancel_getnameinfo) {
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
ASSERT(1 == timer_cb_called);
cleanup_threadpool();
MAKE_VALGRIND_HAPPY();
return 0;
}
@ -277,8 +228,6 @@ TEST_IMPL(threadpool_cancel_work) {
ASSERT(1 == timer_cb_called);
ASSERT(ARRAY_SIZE(reqs) == done2_cb_called);
cleanup_threadpool();
MAKE_VALGRIND_HAPPY();
return 0;
}
@ -332,7 +281,6 @@ TEST_IMPL(threadpool_cancel_fs) {
ASSERT(n == fs_cb_called);
ASSERT(1 == timer_cb_called);
cleanup_threadpool();
MAKE_VALGRIND_HAPPY();
return 0;
@ -340,23 +288,17 @@ TEST_IMPL(threadpool_cancel_fs) {
TEST_IMPL(threadpool_cancel_single) {
struct suspend_req s;
uv_loop_t* loop;
uv_work_t req;
putenv("UV_THREADPOOL_SIZE=1");
ASSERT(0 == uv_sem_init(&s.sem, 0));
saturate_threadpool();
loop = uv_default_loop();
ASSERT(0 == uv_queue_work(loop, &s.req, suspend, NULL));
ASSERT(0 == uv_queue_work(loop, &req, (uv_work_cb) abort, nop_done_cb));
ASSERT(0 == uv_cancel((uv_req_t*) &req));
uv_sem_post(&s.sem);
ASSERT(0 == done_cb_called);
unblock_threadpool();
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
ASSERT(1 == done_cb_called);
uv_sem_destroy(&s.sem);
MAKE_VALGRIND_HAPPY();
return 0;