unix: fix uv_fs_event_stop() from fs_event_cb
The following changeset
442b8a5a84 "unix: use QUEUE_MOVE when iterating over lists"
introduced a new assert failure:
`queue_foreach_delete` failed: exit code 6
Output from process `queue_foreach_delete`:
run-tests: src/unix/linux-inotify.c:244: uv_fs_event_stop: Assertion `w != ((void *)0)' failed.
Simplest test case for this:
1. create and start two uv_fs_event_t for the same path;
2. in the callback for the first one, call uv_close() on it;
3. assert/segfault while accessing the second uv_fs_event_t from uv__inotify_read().
PR-URL: https://github.com/libuv/libuv/pull/621
Reviewed-By: Saúl Ibarra Corretgé <saghul@gmail.com>
This commit is contained in:
parent
6060841edc
commit
46343764d5
@ -35,6 +35,7 @@
|
||||
struct watcher_list {
|
||||
RB_ENTRY(watcher_list) entry;
|
||||
QUEUE watchers;
|
||||
int iterating;
|
||||
char* path;
|
||||
int wd;
|
||||
};
|
||||
@ -113,6 +114,15 @@ static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
|
||||
return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
|
||||
}
|
||||
|
||||
static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
|
||||
/* if the watcher_list->watchers is being iterated over, we can't free it. */
|
||||
if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
|
||||
/* No watchers left for this path. Clean up. */
|
||||
RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
|
||||
uv__inotify_rm_watch(loop->inotify_fd, w->wd);
|
||||
uv__free(w);
|
||||
}
|
||||
}
|
||||
|
||||
static void uv__inotify_read(uv_loop_t* loop,
|
||||
uv__io_t* dummy,
|
||||
@ -160,6 +170,18 @@ static void uv__inotify_read(uv_loop_t* loop,
|
||||
*/
|
||||
path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
|
||||
|
||||
/* We're about to iterate over the queue and call user's callbacks.
|
||||
* What can go wrong?
|
||||
* A callback could call uv_fs_event_stop()
|
||||
* and the queue can change under our feet.
|
||||
* So, we use QUEUE_MOVE() trick to safely iterate over the queue.
|
||||
* And we don't free the watcher_list until we're done iterating.
|
||||
*
|
||||
* First,
|
||||
* tell uv_fs_event_stop() (that could be called from a user's callback)
|
||||
* not to free watcher_list.
|
||||
*/
|
||||
w->iterating = 1;
|
||||
QUEUE_MOVE(&w->watchers, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
@ -170,6 +192,9 @@ static void uv__inotify_read(uv_loop_t* loop,
|
||||
|
||||
h->cb(h, path, events, 0);
|
||||
}
|
||||
/* done iterating, time to (maybe) free empty watcher_list */
|
||||
w->iterating = 0;
|
||||
maybe_free_watcher_list(w, loop);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -221,6 +246,7 @@ int uv_fs_event_start(uv_fs_event_t* handle,
|
||||
w->wd = wd;
|
||||
w->path = strcpy((char*)(w + 1), path);
|
||||
QUEUE_INIT(&w->watchers);
|
||||
w->iterating = 0;
|
||||
RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
|
||||
|
||||
no_insert:
|
||||
@ -248,12 +274,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
|
||||
uv__handle_stop(handle);
|
||||
QUEUE_REMOVE(&handle->watchers);
|
||||
|
||||
if (QUEUE_EMPTY(&w->watchers)) {
|
||||
/* No watchers left for this path. Clean up. */
|
||||
RB_REMOVE(watcher_root, CAST(&handle->loop->inotify_watchers), w);
|
||||
uv__inotify_rm_watch(handle->loop->inotify_fd, w->wd);
|
||||
uv__free(w);
|
||||
}
|
||||
maybe_free_watcher_list(w, handle->loop);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -60,6 +60,9 @@
|
||||
static const unsigned first_handle_number_idle = 2;
|
||||
static const unsigned first_handle_number_prepare = 2;
|
||||
static const unsigned first_handle_number_check = 2;
|
||||
#ifdef __linux__
|
||||
static const unsigned first_handle_number_fs_event = 0;
|
||||
#endif
|
||||
|
||||
|
||||
#define DEFINE_GLOBALS_AND_CBS(name) \
|
||||
@ -119,6 +122,45 @@ DEFINE_GLOBALS_AND_CBS(idle)
|
||||
DEFINE_GLOBALS_AND_CBS(prepare)
|
||||
DEFINE_GLOBALS_AND_CBS(check)
|
||||
|
||||
#ifdef __linux__
|
||||
DEFINE_GLOBALS_AND_CBS(fs_event)
|
||||
|
||||
static const char watched_dir[] = ".";
|
||||
static uv_timer_t timer;
|
||||
static unsigned helper_timer_cb_calls;
|
||||
|
||||
|
||||
static void init_and_start_fs_events(uv_loop_t* loop) {
|
||||
size_t i;
|
||||
for (i = 0; i < ARRAY_SIZE(fs_event); i++) {
|
||||
int r;
|
||||
r = uv_fs_event_init(loop, &fs_event[i]);
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = uv_fs_event_start(&fs_event[i],
|
||||
(uv_fs_event_cb)fs_event_cbs[i],
|
||||
watched_dir,
|
||||
0);
|
||||
ASSERT(r == 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void helper_timer_cb(uv_timer_t* thandle) {
|
||||
int r;
|
||||
uv_fs_t fs_req;
|
||||
|
||||
/* fire all fs_events */
|
||||
r = uv_fs_utime(thandle->loop, &fs_req, watched_dir, 0, 0, NULL);
|
||||
ASSERT(r == 0);
|
||||
ASSERT(fs_req.result == 0);
|
||||
ASSERT(fs_req.fs_type == UV_FS_UTIME);
|
||||
ASSERT(strcmp(fs_req.path, watched_dir) == 0);
|
||||
uv_fs_req_cleanup(&fs_req);
|
||||
|
||||
helper_timer_cb_calls++;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
TEST_IMPL(queue_foreach_delete) {
|
||||
uv_loop_t* loop;
|
||||
@ -130,6 +172,17 @@ TEST_IMPL(queue_foreach_delete) {
|
||||
INIT_AND_START(prepare, loop);
|
||||
INIT_AND_START(check, loop);
|
||||
|
||||
#ifdef __linux__
|
||||
init_and_start_fs_events(loop);
|
||||
|
||||
/* helper timer to trigger async and fs_event callbacks */
|
||||
r = uv_timer_init(loop, &timer);
|
||||
ASSERT(r == 0);
|
||||
|
||||
r = uv_timer_start(&timer, helper_timer_cb, 0, 0);
|
||||
ASSERT(r == 0);
|
||||
#endif
|
||||
|
||||
r = uv_run(loop, UV_RUN_NOWAIT);
|
||||
ASSERT(r == 1);
|
||||
|
||||
@ -137,6 +190,10 @@ TEST_IMPL(queue_foreach_delete) {
|
||||
END_ASSERTS(prepare);
|
||||
END_ASSERTS(check);
|
||||
|
||||
#ifdef __linux__
|
||||
ASSERT(helper_timer_cb_calls == 1);
|
||||
#endif
|
||||
|
||||
MAKE_VALGRIND_HAPPY();
|
||||
|
||||
return 0;
|
||||
|
||||
Loading…
Reference in New Issue
Block a user