unix: switch to c11 atomics (#3688)

Fixes: https://github.com/libuv/libuv/issues/3683
This commit is contained in:
Ben Noordhuis 2022-07-19 10:09:09 +02:00 committed by GitHub
parent fb85db1d3e
commit ac5180e29c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 48 additions and 134 deletions

View File

@ -96,7 +96,6 @@ else # WINNT
uvinclude_HEADERS += include/uv/unix.h
AM_CPPFLAGS += -I$(top_srcdir)/src/unix
libuv_la_SOURCES += src/unix/async.c \
src/unix/atomic-ops.h \
src/unix/core.c \
src/unix/dl.c \
src/unix/fs.c \
@ -110,7 +109,6 @@ libuv_la_SOURCES += src/unix/async.c \
src/unix/process.c \
src/unix/random-devurandom.c \
src/unix/signal.c \
src/unix/spinlock.h \
src/unix/stream.c \
src/unix/tcp.c \
src/unix/thread.c \

View File

@ -24,9 +24,9 @@
#include "uv.h"
#include "internal.h"
#include "atomic-ops.h"
#include <errno.h>
#include <stdatomic.h>
#include <stdio.h> /* snprintf() */
#include <assert.h>
#include <stdlib.h>
@ -40,6 +40,7 @@
static void uv__async_send(uv_loop_t* loop);
static int uv__async_start(uv_loop_t* loop);
static void uv__cpu_relax(void);
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
@ -61,19 +62,26 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
int uv_async_send(uv_async_t* handle) {
_Atomic int* pending;
int expected;
pending = (_Atomic int*) &handle->pending;
/* Do a cheap read first. */
if (ACCESS_ONCE(int, handle->pending) != 0)
if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
return 0;
/* Tell the other thread we're busy with the handle. */
if (cmpxchgi(&handle->pending, 0, 1) != 0)
expected = 0;
if (!atomic_compare_exchange_strong(pending, &expected, 1))
return 0;
/* Wake up the other thread's event loop. */
uv__async_send(handle->loop);
/* Tell the other thread we're done. */
if (cmpxchgi(&handle->pending, 1, 2) != 1)
expected = 1;
if (!atomic_compare_exchange_strong(pending, &expected, 2))
abort();
return 0;
@ -82,8 +90,11 @@ int uv_async_send(uv_async_t* handle) {
/* Only call this from the event loop thread. */
static int uv__async_spin(uv_async_t* handle) {
_Atomic int* pending;
int expected;
int i;
int rc;
pending = (_Atomic int*) &handle->pending;
for (;;) {
/* 997 is not completely chosen at random. It's a prime number, acyclical
@ -94,13 +105,14 @@ static int uv__async_spin(uv_async_t* handle) {
* rc=1 -- handle is pending, other thread is still working with it.
* rc=2 -- handle is pending, other thread is done.
*/
rc = cmpxchgi(&handle->pending, 2, 0);
expected = 2;
atomic_compare_exchange_strong(pending, &expected, 0);
if (rc != 1)
return rc;
if (expected != 1)
return expected;
/* Other thread is busy with this handle, spin until it's done. */
cpu_relax();
uv__cpu_relax();
}
/* Yield the CPU. We may have preempted the other thread while it's
@ -251,3 +263,16 @@ void uv__async_stop(uv_loop_t* loop) {
uv__close(loop->async_io_watcher.fd);
loop->async_io_watcher.fd = -1;
}
static void uv__cpu_relax(void) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
__asm__ __volatile__ ("yield" ::: "memory");
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
__asm volatile ("" : : : "memory");
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}

View File

@ -1,64 +0,0 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_ATOMIC_OPS_H_
#define UV_ATOMIC_OPS_H_
#include "internal.h" /* UV_UNUSED */
#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
#include <atomic.h>
#endif
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval));
UV_UNUSED(static void cpu_relax(void));
/* Prefer hand-rolled assembly over the gcc builtins because the latter also
* issue full memory barriers.
*/
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
#if defined(__i386__) || defined(__x86_64__)
int out;
__asm__ __volatile__ ("lock; cmpxchg %2, %1;"
: "=a" (out), "+m" (*(volatile int*) ptr)
: "r" (newval), "0" (oldval)
: "memory");
return out;
#elif defined(__MVS__)
/* Use hand-rolled assembly because codegen from builtin __plo_CSST results in
* a runtime bug.
*/
__asm(" cs %0,%2,%1 \n " : "+r"(oldval), "+m"(*ptr) : "r"(newval) :);
return oldval;
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval);
#else
return __sync_val_compare_and_swap(ptr, oldval, newval);
#endif
}
UV_UNUSED(static void cpu_relax(void)) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
__asm__ __volatile__ ("yield" ::: "memory");
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
__asm volatile ("" : : : "memory");
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}
#endif /* UV_ATOMIC_OPS_H_ */

View File

@ -1,53 +0,0 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_SPINLOCK_H_
#define UV_SPINLOCK_H_
#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */
#include "atomic-ops.h"
#define UV_SPINLOCK_INITIALIZER { 0 }
typedef struct {
int lock;
} uv_spinlock_t;
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
ACCESS_ONCE(int, spinlock->lock) = 0;
}
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
while (!uv_spinlock_trylock(spinlock)) cpu_relax();
}
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
ACCESS_ONCE(int, spinlock->lock) = 0;
}
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
/* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
* Not really critical until we have locks that are (frequently) contended
* for by several threads.
*/
return 0 == cmpxchgi(&spinlock->lock, 0, 1);
}
#endif /* UV_SPINLOCK_H_ */

View File

@ -21,8 +21,8 @@
#include "uv.h"
#include "internal.h"
#include "spinlock.h"
#include <stdatomic.h>
#include <stdlib.h>
#include <assert.h>
#include <unistd.h>
@ -64,7 +64,7 @@ static int isreallyatty(int file) {
static int orig_termios_fd = -1;
static struct termios orig_termios;
static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER;
static _Atomic int termios_spinlock;
int uv__tcsetattr(int fd, int how, const struct termios *term) {
int rc;
@ -280,6 +280,7 @@ static void uv__tty_make_raw(struct termios* tio) {
int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
struct termios tmp;
int expected;
int fd;
int rc;
@ -296,12 +297,16 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
return UV__ERR(errno);
/* This is used for uv_tty_reset_mode() */
uv_spinlock_lock(&termios_spinlock);
do
expected = 0;
while (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1));
if (orig_termios_fd == -1) {
orig_termios = tty->orig_termios;
orig_termios_fd = fd;
}
uv_spinlock_unlock(&termios_spinlock);
atomic_store(&termios_spinlock, 0);
}
tmp = tty->orig_termios;
@ -442,17 +447,20 @@ uv_handle_type uv_guess_handle(uv_file file) {
*/
int uv_tty_reset_mode(void) {
int saved_errno;
int expected;
int err;
saved_errno = errno;
if (!uv_spinlock_trylock(&termios_spinlock))
expected = 0;
if (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1))
return UV_EBUSY; /* In uv_tty_set_mode(). */
err = 0;
if (orig_termios_fd != -1)
err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios);
uv_spinlock_unlock(&termios_spinlock);
atomic_store(&termios_spinlock, 0);
errno = saved_errno;
return err;