From f9ad802fa5dd5afe6730f8e00cfdbf98f1d7a969 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Storsj=C3=B6?= Date: Wed, 14 Jul 2021 19:36:30 +0300 Subject: [PATCH] mingw: fix building for ARM/AArch64 Don't use x86 inline assembly in these cases, but fall back to __sync_fetch_and_or, similar to _InterlockedOr8 in the MSVC case. This corresponds to what is done in src/unix/atomic-ops.h, where ARM/AArch64 cases end up implementing cmpxchgi with __sync_val_compare_and_swap. PR-URL: https://github.com/libuv/libuv/pull/3236 Reviewed-By: Jameson Nash --- src/win/atomicops-inl.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/win/atomicops-inl.h b/src/win/atomicops-inl.h index 52713cf3..2f984c6d 100644 --- a/src/win/atomicops-inl.h +++ b/src/win/atomicops-inl.h @@ -39,10 +39,11 @@ static char INLINE uv__atomic_exchange_set(char volatile* target) { return _InterlockedOr8(target, 1); } -#else /* GCC */ +#else /* GCC, Clang in mingw mode */ -/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */ static inline char uv__atomic_exchange_set(char volatile* target) { +#if defined(__i386__) || defined(__x86_64__) + /* Mingw-32 version, hopefully this works for 64-bit gcc as well. */ const char one = 1; char old_value; __asm__ __volatile__ ("lock xchgb %0, %1\n\t" @@ -50,6 +51,9 @@ static inline char uv__atomic_exchange_set(char volatile* target) { : "0"(one), "m"(*target) : "memory"); return old_value; +#else + return __sync_fetch_and_or(target, 1); +#endif } #endif