unix: use relaxed loads/stores for feature checks
Make ThreadSanitizer stop complaining about the static variables that libuv uses to record the presence (or lack) of system calls and other kernel features. Fixes: https://github.com/libuv/libuv/issues/2884 PR-URL: https://github.com/libuv/libuv/pull/2886 Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Jameson Nash <vtjnash@gmail.com>
This commit is contained in:
parent
aeab873bbe
commit
c70dd705bc
@ -216,15 +216,23 @@ int uv__getiovmax(void) {
|
|||||||
#if defined(IOV_MAX)
|
#if defined(IOV_MAX)
|
||||||
return IOV_MAX;
|
return IOV_MAX;
|
||||||
#elif defined(_SC_IOV_MAX)
|
#elif defined(_SC_IOV_MAX)
|
||||||
static int iovmax = -1;
|
static int iovmax_cached = -1;
|
||||||
if (iovmax == -1) {
|
int iovmax;
|
||||||
iovmax = sysconf(_SC_IOV_MAX);
|
|
||||||
|
iovmax = uv__load_relaxed(&iovmax_cached);
|
||||||
|
if (iovmax != -1)
|
||||||
|
return iovmax;
|
||||||
|
|
||||||
/* On some embedded devices (arm-linux-uclibc based ip camera),
|
/* On some embedded devices (arm-linux-uclibc based ip camera),
|
||||||
* sysconf(_SC_IOV_MAX) can not get the correct value. The return
|
* sysconf(_SC_IOV_MAX) can not get the correct value. The return
|
||||||
* value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
|
* value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
|
||||||
*/
|
*/
|
||||||
if (iovmax == -1) iovmax = 1;
|
iovmax = sysconf(_SC_IOV_MAX);
|
||||||
}
|
if (iovmax == -1)
|
||||||
|
iovmax = 1;
|
||||||
|
|
||||||
|
uv__store_relaxed(&iovmax_cached, iovmax);
|
||||||
|
|
||||||
return iovmax;
|
return iovmax;
|
||||||
#else
|
#else
|
||||||
return 1024;
|
return 1024;
|
||||||
@ -658,7 +666,7 @@ ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
|
|||||||
int* end;
|
int* end;
|
||||||
#if defined(__linux__)
|
#if defined(__linux__)
|
||||||
static int no_msg_cmsg_cloexec;
|
static int no_msg_cmsg_cloexec;
|
||||||
if (no_msg_cmsg_cloexec == 0) {
|
if (0 == uv__load_relaxed(&no_msg_cmsg_cloexec)) {
|
||||||
rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
|
rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
|
||||||
if (rc != -1)
|
if (rc != -1)
|
||||||
return rc;
|
return rc;
|
||||||
@ -667,7 +675,7 @@ ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
|
|||||||
rc = recvmsg(fd, msg, flags);
|
rc = recvmsg(fd, msg, flags);
|
||||||
if (rc == -1)
|
if (rc == -1)
|
||||||
return UV__ERR(errno);
|
return UV__ERR(errno);
|
||||||
no_msg_cmsg_cloexec = 1;
|
uv__store_relaxed(&no_msg_cmsg_cloexec, 1);
|
||||||
} else {
|
} else {
|
||||||
rc = recvmsg(fd, msg, flags);
|
rc = recvmsg(fd, msg, flags);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -312,7 +312,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
|
|||||||
uv_once(&once, uv__mkostemp_initonce);
|
uv_once(&once, uv__mkostemp_initonce);
|
||||||
|
|
||||||
#ifdef O_CLOEXEC
|
#ifdef O_CLOEXEC
|
||||||
if (no_cloexec_support == 0 && uv__mkostemp != NULL) {
|
if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
|
||||||
r = uv__mkostemp(path, O_CLOEXEC);
|
r = uv__mkostemp(path, O_CLOEXEC);
|
||||||
|
|
||||||
if (r >= 0)
|
if (r >= 0)
|
||||||
@ -325,7 +325,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
|
|||||||
|
|
||||||
/* We set the static variable so that next calls don't even
|
/* We set the static variable so that next calls don't even
|
||||||
try to use mkostemp. */
|
try to use mkostemp. */
|
||||||
no_cloexec_support = 1;
|
uv__store_relaxed(&no_cloexec_support, 1);
|
||||||
}
|
}
|
||||||
#endif /* O_CLOEXEC */
|
#endif /* O_CLOEXEC */
|
||||||
|
|
||||||
@ -456,7 +456,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
|
|||||||
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
|
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
|
||||||
#else
|
#else
|
||||||
# if defined(__linux__)
|
# if defined(__linux__)
|
||||||
if (no_preadv) retry:
|
if (uv__load_relaxed(&no_preadv)) retry:
|
||||||
# endif
|
# endif
|
||||||
{
|
{
|
||||||
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
|
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
|
||||||
@ -468,7 +468,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
|
|||||||
req->nbufs,
|
req->nbufs,
|
||||||
req->off);
|
req->off);
|
||||||
if (result == -1 && errno == ENOSYS) {
|
if (result == -1 && errno == ENOSYS) {
|
||||||
no_preadv = 1;
|
uv__store_relaxed(&no_preadv, 1);
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1351,7 +1351,7 @@ static int uv__fs_statx(int fd,
|
|||||||
int mode;
|
int mode;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (no_statx)
|
if (uv__load_relaxed(&no_statx))
|
||||||
return UV_ENOSYS;
|
return UV_ENOSYS;
|
||||||
|
|
||||||
dirfd = AT_FDCWD;
|
dirfd = AT_FDCWD;
|
||||||
@ -1384,7 +1384,7 @@ static int uv__fs_statx(int fd,
|
|||||||
* implemented, rc might return 1 with 0 set as the error code in which
|
* implemented, rc might return 1 with 0 set as the error code in which
|
||||||
* case we return ENOSYS.
|
* case we return ENOSYS.
|
||||||
*/
|
*/
|
||||||
no_statx = 1;
|
uv__store_relaxed(&no_statx, 1);
|
||||||
return UV_ENOSYS;
|
return UV_ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -82,7 +82,7 @@ int uv__io_fork(uv_loop_t* loop) {
|
|||||||
process. So we sidestep the issue by pretending like we never
|
process. So we sidestep the issue by pretending like we never
|
||||||
started it in the first place.
|
started it in the first place.
|
||||||
*/
|
*/
|
||||||
uv__has_forked_with_cfrunloop = 1;
|
uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
|
||||||
uv__free(loop->cf_state);
|
uv__free(loop->cf_state);
|
||||||
loop->cf_state = NULL;
|
loop->cf_state = NULL;
|
||||||
}
|
}
|
||||||
@ -487,7 +487,7 @@ int uv_fs_event_start(uv_fs_event_t* handle,
|
|||||||
if (!(statbuf.st_mode & S_IFDIR))
|
if (!(statbuf.st_mode & S_IFDIR))
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
if (!uv__has_forked_with_cfrunloop) {
|
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
|
||||||
int r;
|
int r;
|
||||||
/* The fallback fd is no longer needed */
|
/* The fallback fd is no longer needed */
|
||||||
uv__close_nocheckstdio(fd);
|
uv__close_nocheckstdio(fd);
|
||||||
@ -522,7 +522,8 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
|
|||||||
uv__handle_stop(handle);
|
uv__handle_stop(handle);
|
||||||
|
|
||||||
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
|
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
|
||||||
if (!uv__has_forked_with_cfrunloop && handle->cf_cb != NULL)
|
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
|
||||||
|
if (handle->cf_cb != NULL)
|
||||||
r = uv__fsevents_close(handle);
|
r = uv__fsevents_close(handle);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@ -198,8 +198,10 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
* that being the largest value I have seen in the wild (and only once.)
|
* that being the largest value I have seen in the wild (and only once.)
|
||||||
*/
|
*/
|
||||||
static const int max_safe_timeout = 1789569;
|
static const int max_safe_timeout = 1789569;
|
||||||
static int no_epoll_pwait;
|
static int no_epoll_pwait_cached;
|
||||||
static int no_epoll_wait;
|
static int no_epoll_wait_cached;
|
||||||
|
int no_epoll_pwait;
|
||||||
|
int no_epoll_wait;
|
||||||
struct epoll_event events[1024];
|
struct epoll_event events[1024];
|
||||||
struct epoll_event* pe;
|
struct epoll_event* pe;
|
||||||
struct epoll_event e;
|
struct epoll_event e;
|
||||||
@ -271,6 +273,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||||
real_timeout = timeout;
|
real_timeout = timeout;
|
||||||
|
|
||||||
|
/* You could argue there is a dependency between these two but
|
||||||
|
* ultimately we don't care about their ordering with respect
|
||||||
|
* to one another. Worst case, we make a few system calls that
|
||||||
|
* could have been avoided because another thread already knows
|
||||||
|
* they fail with ENOSYS. Hardly the end of the world.
|
||||||
|
*/
|
||||||
|
no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
|
||||||
|
no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
/* See the comment for max_safe_timeout for an explanation of why
|
/* See the comment for max_safe_timeout for an explanation of why
|
||||||
* this is necessary. Executive summary: kernel bug workaround.
|
* this is necessary. Executive summary: kernel bug workaround.
|
||||||
@ -288,16 +299,20 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
ARRAY_SIZE(events),
|
ARRAY_SIZE(events),
|
||||||
timeout,
|
timeout,
|
||||||
&sigset);
|
&sigset);
|
||||||
if (nfds == -1 && errno == ENOSYS)
|
if (nfds == -1 && errno == ENOSYS) {
|
||||||
|
uv__store_relaxed(&no_epoll_pwait_cached, 1);
|
||||||
no_epoll_pwait = 1;
|
no_epoll_pwait = 1;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
nfds = epoll_wait(loop->backend_fd,
|
nfds = epoll_wait(loop->backend_fd,
|
||||||
events,
|
events,
|
||||||
ARRAY_SIZE(events),
|
ARRAY_SIZE(events),
|
||||||
timeout);
|
timeout);
|
||||||
if (nfds == -1 && errno == ENOSYS)
|
if (nfds == -1 && errno == ENOSYS) {
|
||||||
|
uv__store_relaxed(&no_epoll_wait_cached, 1);
|
||||||
no_epoll_wait = 1;
|
no_epoll_wait = 1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (sigmask != 0 && no_epoll_pwait != 0)
|
if (sigmask != 0 && no_epoll_pwait != 0)
|
||||||
if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
|
if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
|
||||||
|
|||||||
@ -33,7 +33,6 @@
|
|||||||
#pragma linkage(BPX4CTW, OS)
|
#pragma linkage(BPX4CTW, OS)
|
||||||
#pragma linkage(BPX1CTW, OS)
|
#pragma linkage(BPX1CTW, OS)
|
||||||
|
|
||||||
static int number_of_epolls;
|
|
||||||
static QUEUE global_epoll_queue;
|
static QUEUE global_epoll_queue;
|
||||||
static uv_mutex_t global_epoll_lock;
|
static uv_mutex_t global_epoll_lock;
|
||||||
static uv_once_t once = UV_ONCE_INIT;
|
static uv_once_t once = UV_ONCE_INIT;
|
||||||
|
|||||||
@ -30,6 +30,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* Android versions < 4.1 have a broken pthread_sigmask. */
|
/* Android versions < 4.1 have a broken pthread_sigmask. */
|
||||||
|
#include "uv-common.h"
|
||||||
|
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
@ -38,13 +40,13 @@ int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
|
|||||||
static int workaround;
|
static int workaround;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (workaround) {
|
if (uv__load_relaxed(&workaround)) {
|
||||||
return sigprocmask(how, set, oset);
|
return sigprocmask(how, set, oset);
|
||||||
} else {
|
} else {
|
||||||
err = pthread_sigmask(how, set, oset);
|
err = pthread_sigmask(how, set, oset);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
|
if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
|
||||||
workaround = 1;
|
uv__store_relaxed(&workaround, 1);
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
return -1;
|
return -1;
|
||||||
|
|||||||
@ -326,16 +326,19 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
|
|||||||
|
|
||||||
|
|
||||||
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
|
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
|
||||||
static int single_accept = -1;
|
static int single_accept_cached = -1;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int single_accept;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (tcp->delayed_error)
|
if (tcp->delayed_error)
|
||||||
return tcp->delayed_error;
|
return tcp->delayed_error;
|
||||||
|
|
||||||
|
single_accept = uv__load_relaxed(&single_accept_cached);
|
||||||
if (single_accept == -1) {
|
if (single_accept == -1) {
|
||||||
const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
|
const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
|
||||||
single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
|
single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
|
||||||
|
uv__store_relaxed(&single_accept_cached, single_accept);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (single_accept)
|
if (single_accept)
|
||||||
|
|||||||
@ -859,11 +859,11 @@ __attribute__((destructor))
|
|||||||
void uv_library_shutdown(void) {
|
void uv_library_shutdown(void) {
|
||||||
static int was_shutdown;
|
static int was_shutdown;
|
||||||
|
|
||||||
if (was_shutdown)
|
if (uv__load_relaxed(&was_shutdown))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
uv__process_title_cleanup();
|
uv__process_title_cleanup();
|
||||||
uv__signal_cleanup();
|
uv__signal_cleanup();
|
||||||
uv__threadpool_cleanup();
|
uv__threadpool_cleanup();
|
||||||
was_shutdown = 1;
|
uv__store_relaxed(&was_shutdown, 1);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -60,6 +60,14 @@ extern int snprintf(char*, size_t, const char*, ...);
|
|||||||
#define STATIC_ASSERT(expr) \
|
#define STATIC_ASSERT(expr) \
|
||||||
void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
|
void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
|
||||||
|
|
||||||
|
#ifdef __GNUC__
|
||||||
|
#define uv__load_relaxed(p) __atomic_load_n(p, __ATOMIC_RELAXED)
|
||||||
|
#define uv__store_relaxed(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
|
||||||
|
#else
|
||||||
|
#define uv__load_relaxed(p) (*p)
|
||||||
|
#define uv__store_relaxed(p, v) do *p = v; while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Handle flags. Some flags are specific to Windows or UNIX. */
|
/* Handle flags. Some flags are specific to Windows or UNIX. */
|
||||||
enum {
|
enum {
|
||||||
/* Used by all handles. */
|
/* Used by all handles. */
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user