Merge branch 'v1.x' into HEAD

This commit is contained in:
Steven French 2024-04-11 23:17:33 +12:00
commit 08f65d7a99
28 changed files with 761 additions and 295 deletions

View File

@ -6,6 +6,7 @@ on:
- '**'
- '!docs/**'
- '!.**'
- 'docs/code/**'
- '.github/workflows/CI-sample.yml'
push:
branches:

View File

@ -23,6 +23,11 @@ jobs:
- name: Envinfo
run: npx envinfo
# [AM]SAN fail on newer kernels due to a bigger PIE slide
- name: Disable ASLR
run: |
sudo sysctl -w kernel.randomize_va_space=0
- name: ASAN Build
run: |
mkdir build-asan

View File

@ -566,6 +566,7 @@ if(LIBUV_BUILD_TESTS)
test/test-hrtime.c
test/test-idle.c
test/test-idna.c
test/test-iouring-pollhup.c
test/test-ip4-addr.c
test/test-ip6-addr.c
test/test-ip-name.c

View File

@ -911,7 +911,7 @@ Changes since version 1.41.0:
* zos: treat __rfim_utok as binary (Shuowang (Wayne) Zhang)
* zos: use execvpe() to set environ explictly (Shuowang (Wayne) Zhang)
* zos: use execvpe() to set environ explicitly (Shuowang (Wayne) Zhang)
* zos: use custom proctitle implementation (Shuowang (Wayne) Zhang)
@ -3417,7 +3417,7 @@ Changes since version 1.9.1:
* zos: implement uv__io_check_fd (John Barboza)
* unix: unneccessary use const qualifier in container_of (John Barboza)
* unix: unnecessary use const qualifier in container_of (John Barboza)
* win,tty: add support for ANSI codes in win10 v1511 (Imran Iqbal)
@ -5520,7 +5520,7 @@ Changes since version 0.11.8:
is an int64_t, and no longer an int. (Bert Belder)
* process: make uv_spawn() return some types of errors immediately on windows,
instead of passing the error code the the exit callback. This brings it on
instead of passing the error code the exit callback. This brings it on
par with libuv's behavior on unix. (Bert Belder)

View File

@ -198,6 +198,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-hrtime.c \
test/test-idle.c \
test/test-idna.c \
test/test-iouring-pollhup.c \
test/test-ip4-addr.c \
test/test-ip6-addr.c \
test/test-ip-name.c \

View File

@ -5,25 +5,27 @@
uv_loop_t *loop;
uv_tty_t tty;
int main() {
loop = uv_default_loop();
int main() {
uv_write_t req;
uv_buf_t buf;
uv_write_t req1;
uv_buf_t buf1;
loop = uv_default_loop();
uv_tty_init(loop, &tty, STDOUT_FILENO, 0);
uv_tty_set_mode(&tty, UV_TTY_MODE_NORMAL);
if (uv_guess_handle(1) == UV_TTY) {
uv_write_t req;
uv_buf_t buf;
buf.base = "\033[41;37m";
buf.len = strlen(buf.base);
uv_write(&req, (uv_stream_t*) &tty, &buf, 1, NULL);
buf1.base = "\033[41;37m";
buf1.len = strlen(buf1.base);
uv_write(&req1, (uv_stream_t*) &tty, &buf1, 1, NULL);
}
uv_write_t req;
uv_buf_t buf;
buf.base = "Hello TTY\n";
buf.len = strlen(buf.base);
uv_write(&req, (uv_stream_t*) &tty, &buf, 1, NULL);
uv_tty_reset_mode();
return uv_run(loop, UV_RUN_DEFAULT);
}

View File

@ -65,6 +65,10 @@ API
at the end of this procedure, then the handle is destroyed with a
``UV_ETIMEDOUT`` error passed to the corresponding callback.
If `delay` is less than 1 then ``UV_EINVAL`` is returned.
.. versionchanged:: 1.49.0 If `delay` is less than 1 then ``UV_EINVAL``` is returned.
.. c:function:: int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable)
Enable / disable simultaneous asynchronous accept requests that are

View File

@ -285,7 +285,9 @@ API
local sockets.
:param handle: UDP handle. Should have been initialized with
:c:func:`uv_udp_init`.
:c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have
been bound to an address explicitly with :c:func:`uv_udp_bind`, or
implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`.
:param on: 1 for on, 0 for off.
@ -296,7 +298,9 @@ API
Set the multicast ttl.
:param handle: UDP handle. Should have been initialized with
:c:func:`uv_udp_init`.
:c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have
been bound to an address explicitly with :c:func:`uv_udp_bind`, or
implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`.
:param ttl: 1 through 255.
@ -307,7 +311,9 @@ API
Set the multicast interface to send or receive data on.
:param handle: UDP handle. Should have been initialized with
:c:func:`uv_udp_init`.
:c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have
been bound to an address explicitly with :c:func:`uv_udp_bind`, or
implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`.
:param interface_addr: interface address.
@ -318,7 +324,9 @@ API
Set broadcast on or off.
:param handle: UDP handle. Should have been initialized with
:c:func:`uv_udp_init`.
:c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have
been bound to an address explicitly with :c:func:`uv_udp_bind`, or
implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`.
:param on: 1 for on, 0 for off.
@ -329,7 +337,9 @@ API
Set the time to live.
:param handle: UDP handle. Should have been initialized with
:c:func:`uv_udp_init`.
:c:func:`uv_udp_init_ex` as either ``AF_INET`` or ``AF_INET6``, or have
been bound to an address explicitly with :c:func:`uv_udp_bind`, or
implicitly with :c:func:`uv_udp_send()` or :c:func:`uv_udp_recv_start`.
:param ttl: 1 through 255.

View File

@ -290,8 +290,8 @@ typedef struct {
#define UV_ONCE_INIT { 0, NULL }
typedef struct uv_once_s {
unsigned char ran;
HANDLE event;
unsigned char unused;
INIT_ONCE init_once;
} uv_once_t;
/* Platform-specific definitions for uv_spawn support. */

View File

@ -1869,6 +1869,8 @@ unsigned int uv_available_parallelism(void) {
#ifdef __linux__
cpu_set_t set;
long rc;
double rc_with_cgroup;
uv__cpu_constraint c = {0, 0, 0.0};
memset(&set, 0, sizeof(set));
@ -1880,8 +1882,13 @@ unsigned int uv_available_parallelism(void) {
rc = CPU_COUNT(&set);
else
rc = sysconf(_SC_NPROCESSORS_ONLN);
if (rc < 1)
if (uv__get_constrained_cpu(&c) == 0 && c.period_length > 0) {
rc_with_cgroup = (double)c.quota_per_period / c.period_length * c.proportions;
if (rc_with_cgroup < rc)
rc = (long)rc_with_cgroup; /* Casting is safe since rc_with_cgroup < rc < LONG_MAX */
}
if (rc < 1)
rc = 1;
return (unsigned) rc;

View File

@ -36,9 +36,45 @@ int uv_uptime(double* uptime) {
}
int uv_resident_set_memory(size_t* rss) {
/* FIXME: read /proc/meminfo? */
*rss = 0;
char buf[1024];
const char* s;
long val;
int rc;
int i;
struct sysinfo si;
/* rss: 24th element */
rc = uv__slurp("/proc/self/stat", buf, sizeof(buf));
if (rc < 0)
return rc;
/* find the last ')' */
s = strrchr(buf, ')');
if (s == NULL)
goto err;
for (i = 1; i <= 22; i++) {
s = strchr(s + 1, ' ');
if (s == NULL)
goto err;
}
errno = 0;
val = strtol(s, NULL, 10);
if (val < 0 || errno != 0)
goto err;
do
rc = sysinfo(&si);
while (rc == -1 && errno == EINTR);
if (rc == -1)
return UV__ERR(errno);
*rss = val * si.mem_unit;
return 0;
err:
return UV_EINVAL;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {

View File

@ -31,6 +31,7 @@
#include <errno.h>
#include <dlfcn.h>
#include <stdatomic.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -82,17 +83,6 @@
# include <sys/statfs.h>
#endif
#if defined(__CYGWIN__) || \
(defined(__HAIKU__) && B_HAIKU_VERSION < B_HAIKU_VERSION_1_PRE_BETA_5) || \
(defined(__sun) && !defined(__illumos__)) || \
(defined(__APPLE__) && !TARGET_OS_IPHONE && \
MAC_OS_X_VERSION_MIN_REQUIRED < 110000)
#define preadv(fd, bufs, nbufs, off) \
pread(fd, (bufs)->iov_base, (bufs)->iov_len, off)
#define pwritev(fd, bufs, nbufs, off) \
pwrite(fd, (bufs)->iov_base, (bufs)->iov_len, off)
#endif
#if defined(_AIX) && _XOPEN_SOURCE <= 600
extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
#endif
@ -406,6 +396,115 @@ static ssize_t uv__fs_open(uv_fs_t* req) {
}
static ssize_t uv__preadv_or_pwritev_emul(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off,
int is_pread) {
ssize_t total;
ssize_t r;
size_t i;
size_t n;
void* p;
total = 0;
for (i = 0; i < (size_t) nbufs; i++) {
p = bufs[i].iov_base;
n = bufs[i].iov_len;
do
if (is_pread)
r = pread(fd, p, n, off);
else
r = pwrite(fd, p, n, off);
while (r == -1 && errno == EINTR);
if (r == -1) {
if (total > 0)
return total;
return -1;
}
off += r;
total += r;
if ((size_t) r < n)
return total;
}
return total;
}
#ifdef __linux__
typedef int uv__iovcnt;
#else
typedef size_t uv__iovcnt;
#endif
static ssize_t uv__preadv_emul(int fd,
const struct iovec* bufs,
uv__iovcnt nbufs,
off_t off) {
return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1);
}
static ssize_t uv__pwritev_emul(int fd,
const struct iovec* bufs,
uv__iovcnt nbufs,
off_t off) {
return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0);
}
/* The function pointer cache is an uintptr_t because _Atomic void*
* doesn't work on macos/ios/etc...
*/
static ssize_t uv__preadv_or_pwritev(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off,
_Atomic uintptr_t* cache,
int is_pread) {
ssize_t (*f)(int, const struct iovec*, uv__iovcnt, off_t);
void* p;
p = (void*) atomic_load_explicit(cache, memory_order_relaxed);
if (p == NULL) {
#ifdef RTLD_DEFAULT
p = dlsym(RTLD_DEFAULT, is_pread ? "preadv" : "pwritev");
dlerror(); /* Clear errors. */
#endif /* RTLD_DEFAULT */
if (p == NULL)
p = is_pread ? uv__preadv_emul : uv__pwritev_emul;
atomic_store_explicit(cache, (uintptr_t) p, memory_order_relaxed);
}
f = p;
return f(fd, bufs, nbufs, off);
}
static ssize_t uv__preadv(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off) {
static _Atomic uintptr_t cache;
return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/1);
}
static ssize_t uv__pwritev(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off) {
static _Atomic uintptr_t cache;
return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0);
}
static ssize_t uv__fs_read(uv_fs_t* req) {
const struct iovec* bufs;
unsigned int iovmax;
@ -433,7 +532,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
if (nbufs == 1)
r = pread(fd, bufs->iov_base, bufs->iov_len, off);
else if (nbufs > 1)
r = preadv(fd, bufs, nbufs, off);
r = uv__preadv(fd, bufs, nbufs, off);
}
#ifdef __PASE__
@ -691,14 +790,23 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) {
static ssize_t uv__fs_realpath(uv_fs_t* req) {
char* buf;
char* tmp;
#if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
buf = realpath(req->path, NULL);
if (buf == NULL)
tmp = realpath(req->path, NULL);
if (tmp == NULL)
return -1;
buf = uv__strdup(tmp);
free(tmp); /* _Not_ uv__free. */
if (buf == NULL) {
errno = ENOMEM;
return -1;
}
#else
ssize_t len;
(void)tmp;
len = uv__fs_pathmax_size(req->path);
buf = uv__malloc(len + 1);
@ -1112,7 +1220,7 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
if (nbufs == 1)
r = pwrite(fd, bufs->iov_base, bufs->iov_len, off);
else if (nbufs > 1)
r = pwritev(fd, bufs, nbufs, off);
r = uv__pwritev(fd, bufs, nbufs, off);
}
return r;

View File

@ -793,6 +793,7 @@ int uv__cf_loop_signal(uv_loop_t* loop,
/* Runs in UV loop to initialize handle */
int uv__fsevents_init(uv_fs_event_t* handle) {
char* buf;
int err;
uv__cf_loop_state_t* state;
@ -801,9 +802,13 @@ int uv__fsevents_init(uv_fs_event_t* handle) {
return err;
/* Get absolute path to file */
handle->realpath = realpath(handle->path, NULL);
if (handle->realpath == NULL)
buf = realpath(handle->path, NULL);
if (buf == NULL)
return UV__ERR(errno);
handle->realpath = uv__strdup(buf);
free(buf); /* _Not_ uv__free. */
if (handle->realpath == NULL)
return UV_ENOMEM;
handle->realpath_len = strlen(handle->realpath);
/* Initialize event queue */

View File

@ -472,4 +472,14 @@ uv__fs_copy_file_range(int fd_in,
#define UV__CPU_AFFINITY_SUPPORTED 0
#endif
#ifdef __linux__
typedef struct {
long long quota_per_period;
long long period_length;
double proportions;
} uv__cpu_constraint;
int uv__get_constrained_cpu(uv__cpu_constraint* constraint);
#endif
#endif /* UV_UNIX_INTERNAL_H_ */

View File

@ -712,23 +712,17 @@ void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
* This avoids a problem where the same file description remains open
* in another process, causing repeated junk epoll events.
*
* Perform EPOLL_CTL_DEL immediately instead of going through
* io_uring's submit queue, otherwise the file descriptor may
* be closed by the time the kernel starts the operation.
*
* We pass in a dummy epoll_event, to work around a bug in old kernels.
*
* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
* has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
*/
memset(&dummy, 0, sizeof(dummy));
if (inv == NULL) {
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
} else {
uv__epoll_ctl_prep(loop->backend_fd,
&lfields->ctl,
inv->prep,
EPOLL_CTL_DEL,
fd,
&dummy);
}
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
}
@ -1215,6 +1209,10 @@ static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) {
}
/* Only for EPOLL_CTL_ADD and EPOLL_CTL_MOD. EPOLL_CTL_DEL should always be
* executed immediately, otherwise the file descriptor may have been closed
* by the time the kernel starts the operation.
*/
static void uv__epoll_ctl_prep(int epollfd,
struct uv__iou* ctl,
struct epoll_event (*events)[256],
@ -1226,45 +1224,28 @@ static void uv__epoll_ctl_prep(int epollfd,
uint32_t mask;
uint32_t slot;
if (ctl->ringfd == -1) {
if (!epoll_ctl(epollfd, op, fd, e))
return;
assert(op == EPOLL_CTL_ADD || op == EPOLL_CTL_MOD);
assert(ctl->ringfd != -1);
if (op == EPOLL_CTL_DEL)
return; /* Ignore errors, may be racing with another thread. */
mask = ctl->sqmask;
slot = (*ctl->sqtail)++ & mask;
if (op != EPOLL_CTL_ADD)
abort();
pe = &(*events)[slot];
*pe = *e;
if (errno != EEXIST)
abort();
sqe = ctl->sqe;
sqe = &sqe[slot];
/* File descriptor that's been watched before, update event mask. */
if (!epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, e))
return;
memset(sqe, 0, sizeof(*sqe));
sqe->addr = (uintptr_t) pe;
sqe->fd = epollfd;
sqe->len = op;
sqe->off = fd;
sqe->opcode = UV__IORING_OP_EPOLL_CTL;
sqe->user_data = op | slot << 2 | (int64_t) fd << 32;
abort();
} else {
mask = ctl->sqmask;
slot = (*ctl->sqtail)++ & mask;
pe = &(*events)[slot];
*pe = *e;
sqe = ctl->sqe;
sqe = &sqe[slot];
memset(sqe, 0, sizeof(*sqe));
sqe->addr = (uintptr_t) pe;
sqe->fd = epollfd;
sqe->len = op;
sqe->off = fd;
sqe->opcode = UV__IORING_OP_EPOLL_CTL;
sqe->user_data = op | slot << 2 | (int64_t) fd << 32;
if ((*ctl->sqhead & mask) == (*ctl->sqtail & mask))
uv__epoll_ctl_flush(epollfd, ctl, events);
}
if ((*ctl->sqhead & mask) == (*ctl->sqtail & mask))
uv__epoll_ctl_flush(epollfd, ctl, events);
}
@ -1405,8 +1386,22 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
w->events = w->pevents;
e.events = w->pevents;
e.data.fd = w->fd;
fd = w->fd;
uv__epoll_ctl_prep(epollfd, ctl, &prep, op, w->fd, &e);
if (ctl->ringfd != -1) {
uv__epoll_ctl_prep(epollfd, ctl, &prep, op, fd, &e);
continue;
}
if (!epoll_ctl(epollfd, op, fd, &e))
continue;
assert(op == EPOLL_CTL_ADD);
assert(errno == EEXIST);
/* File descriptor that's been watched before, update event mask. */
if (epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &e))
abort();
}
inv.events = events;
@ -1494,8 +1489,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
*
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*
* Perform EPOLL_CTL_DEL immediately instead of going through
* io_uring's submit queue, otherwise the file descriptor may
* be closed by the time the kernel starts the operation.
*/
uv__epoll_ctl_prep(epollfd, ctl, &prep, EPOLL_CTL_DEL, fd, pe);
epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, pe);
continue;
}
@ -1630,36 +1629,17 @@ done:
int uv_resident_set_memory(size_t* rss) {
char buf[1024];
const char* s;
ssize_t n;
long val;
int fd;
int rc;
int i;
do
fd = open("/proc/self/stat", O_RDONLY);
while (fd == -1 && errno == EINTR);
if (fd == -1)
return UV__ERR(errno);
do
n = read(fd, buf, sizeof(buf) - 1);
while (n == -1 && errno == EINTR);
uv__close(fd);
if (n == -1)
return UV__ERR(errno);
buf[n] = '\0';
s = strchr(buf, ' ');
if (s == NULL)
goto err;
s += 1;
if (*s != '(')
goto err;
s = strchr(s, ')');
/* rss: 24th element */
rc = uv__slurp("/proc/self/stat", buf, sizeof(buf));
if (rc < 0)
return rc;
/* find the last ')' */
s = strrchr(buf, ')');
if (s == NULL)
goto err;
@ -1671,9 +1651,7 @@ int uv_resident_set_memory(size_t* rss) {
errno = 0;
val = strtol(s, NULL, 10);
if (errno != 0)
goto err;
if (val < 0)
if (val < 0 || errno != 0)
goto err;
*rss = val * getpagesize();
@ -2278,6 +2256,136 @@ uint64_t uv_get_available_memory(void) {
}
static int uv__get_cgroupv2_constrained_cpu(const char* cgroup,
uv__cpu_constraint* constraint) {
char path[256];
char buf[1024];
unsigned int weight;
int cgroup_size;
const char* cgroup_trimmed;
char quota_buf[16];
if (strncmp(cgroup, "0::/", 4) != 0)
return UV_EINVAL;
/* Trim ending \n by replacing it with a 0 */
cgroup_trimmed = cgroup + sizeof("0::/") - 1; /* Skip the prefix "0::/" */
cgroup_size = (int)strcspn(cgroup_trimmed, "\n"); /* Find the first slash */
/* Construct the path to the cpu.max file */
snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.max", cgroup_size,
cgroup_trimmed);
/* Read cpu.max */
if (uv__slurp(path, buf, sizeof(buf)) < 0)
return UV_EIO;
if (sscanf(buf, "%15s %llu", quota_buf, &constraint->period_length) != 2)
return UV_EINVAL;
if (strncmp(quota_buf, "max", 3) == 0)
constraint->quota_per_period = LLONG_MAX;
else if (sscanf(quota_buf, "%lld", &constraint->quota_per_period) != 1)
return UV_EINVAL; // conversion failed
/* Construct the path to the cpu.weight file */
snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.weight", cgroup_size,
cgroup_trimmed);
/* Read cpu.weight */
if (uv__slurp(path, buf, sizeof(buf)) < 0)
return UV_EIO;
if (sscanf(buf, "%u", &weight) != 1)
return UV_EINVAL;
constraint->proportions = (double)weight / 100.0;
return 0;
}
static char* uv__cgroup1_find_cpu_controller(const char* cgroup,
int* cgroup_size) {
/* Seek to the cpu controller line. */
char* cgroup_cpu = strstr(cgroup, ":cpu,");
if (cgroup_cpu != NULL) {
/* Skip the controller prefix to the start of the cgroup path. */
cgroup_cpu += sizeof(":cpu,") - 1;
/* Determine the length of the cgroup path, excluding the newline. */
*cgroup_size = (int)strcspn(cgroup_cpu, "\n");
}
return cgroup_cpu;
}
static int uv__get_cgroupv1_constrained_cpu(const char* cgroup,
uv__cpu_constraint* constraint) {
char path[256];
char buf[1024];
unsigned int shares;
int cgroup_size;
char* cgroup_cpu;
cgroup_cpu = uv__cgroup1_find_cpu_controller(cgroup, &cgroup_size);
if (cgroup_cpu == NULL)
return UV_EIO;
/* Construct the path to the cpu.cfs_quota_us file */
snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.cfs_quota_us",
cgroup_size, cgroup_cpu);
if (uv__slurp(path, buf, sizeof(buf)) < 0)
return UV_EIO;
if (sscanf(buf, "%lld", &constraint->quota_per_period) != 1)
return UV_EINVAL;
/* Construct the path to the cpu.cfs_period_us file */
snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.cfs_period_us",
cgroup_size, cgroup_cpu);
/* Read cpu.cfs_period_us */
if (uv__slurp(path, buf, sizeof(buf)) < 0)
return UV_EIO;
if (sscanf(buf, "%lld", &constraint->period_length) != 1)
return UV_EINVAL;
/* Construct the path to the cpu.shares file */
snprintf(path, sizeof(path), "/sys/fs/cgroup/%.*s/cpu.shares", cgroup_size,
cgroup_cpu);
/* Read cpu.shares */
if (uv__slurp(path, buf, sizeof(buf)) < 0)
return UV_EIO;
if (sscanf(buf, "%u", &shares) != 1)
return UV_EINVAL;
constraint->proportions = (double)shares / 1024.0;
return 0;
}
int uv__get_constrained_cpu(uv__cpu_constraint* constraint) {
char cgroup[1024];
/* Read the cgroup from /proc/self/cgroup */
if (uv__slurp("/proc/self/cgroup", cgroup, sizeof(cgroup)) < 0)
return UV_EIO;
/* Check if the system is using cgroup v2 by examining /proc/self/cgroup
* The entry for cgroup v2 is always in the format "0::$PATH"
* see https://docs.kernel.org/admin-guide/cgroup-v2.html */
if (strncmp(cgroup, "0::/", 4) == 0)
return uv__get_cgroupv2_constrained_cpu(cgroup, constraint);
else
return uv__get_cgroupv1_constrained_cpu(cgroup, constraint);
}
void uv_loadavg(double avg[3]) {
struct sysinfo info;
char buf[128]; /* Large enough to hold all of /proc/loadavg. */

View File

@ -979,11 +979,13 @@ static int uv__stream_queue_fd(uv_stream_t* stream, int fd) {
static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
struct cmsghdr* cmsg;
char* p;
char* pe;
int fd;
int err;
size_t i;
size_t count;
err = 0;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (cmsg->cmsg_type != SCM_RIGHTS) {
fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
@ -996,24 +998,26 @@ static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
assert(count % sizeof(fd) == 0);
count /= sizeof(fd);
for (i = 0; i < count; i++) {
memcpy(&fd, (char*) CMSG_DATA(cmsg) + i * sizeof(fd), sizeof(fd));
/* Already has accepted fd, queue now */
if (stream->accepted_fd != -1) {
err = uv__stream_queue_fd(stream, fd);
if (err != 0) {
/* Close rest */
for (; i < count; i++)
uv__close(fd);
return err;
}
} else {
stream->accepted_fd = fd;
p = (void*) CMSG_DATA(cmsg);
pe = p + count * sizeof(fd);
while (p < pe) {
memcpy(&fd, p, sizeof(fd));
p += sizeof(fd);
if (err == 0) {
if (stream->accepted_fd == -1)
stream->accepted_fd = fd;
else
err = uv__stream_queue_fd(stream, fd);
}
if (err != 0)
uv__close(fd);
}
}
return 0;
return err;
}

View File

@ -467,8 +467,8 @@ int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
if (!on)
return 0;
if (delay == 0)
return -1;
if (delay < 1)
return UV_EINVAL;
#ifdef __sun
/* The implementation of TCP keep-alive on Solaris/SmartOS is a bit unusual
@ -507,11 +507,11 @@ int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(idle)))
return UV__ERR(errno);
intvl = idle/3;
intvl = 10; /* required at least 10 seconds */
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(intvl)))
return UV__ERR(errno);
cnt = 3;
cnt = 1; /* 1 retry, ensure (TCP_KEEPINTVL * TCP_KEEPCNT) is 10 seconds */
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &cnt, sizeof(cnt)))
return UV__ERR(errno);
#else
@ -524,9 +524,7 @@ int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
/* Note that the consequent probes will not be sent at equal intervals on Solaris,
* but will be sent using the exponential backoff algorithm. */
intvl = idle/3;
cnt = 3;
int time_to_abort = intvl * cnt;
int time_to_abort = 10*1000; /* 10 seconds, kernel expects milliseconds */
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE_ABORT_THRESHOLD, &time_to_abort, sizeof(time_to_abort)))
return UV__ERR(errno);
#endif
@ -543,7 +541,7 @@ int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
#endif
#ifdef TCP_KEEPINTVL
intvl = 1; /* 1 second; same as default on Win32 */
intvl = 1; /* 1 second; same as default on Win32 */
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(intvl)))
return UV__ERR(errno);
#endif

View File

@ -141,7 +141,7 @@ static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
if (revents & POLLIN)
uv__udp_recvmsg(handle);
if (revents & POLLOUT) {
if (revents & POLLOUT && !uv__is_closing(handle)) {
uv__udp_sendmsg(handle);
uv__udp_run_completed(handle);
}
@ -275,8 +275,61 @@ static void uv__udp_recvmsg(uv_udp_t* handle) {
&& handle->recv_cb != NULL);
}
static void uv__udp_sendmsg(uv_udp_t* handle) {
static void uv__udp_sendmsg_one(uv_udp_t* handle, uv_udp_send_t* req) {
struct uv__queue* q;
struct msghdr h;
ssize_t size;
for (;;) {
memset(&h, 0, sizeof h);
if (req->addr.ss_family == AF_UNSPEC) {
h.msg_name = NULL;
h.msg_namelen = 0;
} else {
h.msg_name = &req->addr;
if (req->addr.ss_family == AF_INET6)
h.msg_namelen = sizeof(struct sockaddr_in6);
else if (req->addr.ss_family == AF_INET)
h.msg_namelen = sizeof(struct sockaddr_in);
else if (req->addr.ss_family == AF_UNIX)
h.msg_namelen = sizeof(struct sockaddr_un);
else {
assert(0 && "unsupported address family");
abort();
}
}
h.msg_iov = (struct iovec*) req->bufs;
h.msg_iovlen = req->nbufs;
do
size = sendmsg(handle->io_watcher.fd, &h, 0);
while (size == -1 && errno == EINTR);
if (size == -1)
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
return;
req->status = (size == -1 ? UV__ERR(errno) : size);
/* Sending a datagram is an atomic operation: either all data
* is written or nothing is (and EMSGSIZE is raised). That is
* why we don't handle partial writes. Just pop the request
* off the write queue and onto the completed queue, done.
*/
uv__queue_remove(&req->queue);
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
uv__io_feed(handle->loop, &handle->io_watcher);
if (uv__queue_empty(&handle->write_queue))
return;
q = uv__queue_head(&handle->write_queue);
req = uv__queue_data(q, uv_udp_send_t, queue);
}
}
#if defined(__linux__) || defined(__FreeBSD__)
static void uv__udp_sendmsg_many(uv_udp_t* handle) {
uv_udp_send_t* req;
struct mmsghdr h[20];
struct mmsghdr* p;
@ -285,16 +338,11 @@ static void uv__udp_sendmsg(uv_udp_t* handle) {
size_t pkts;
size_t i;
if (uv__queue_empty(&handle->write_queue))
return;
write_queue_drain:
for (pkts = 0, q = uv__queue_head(&handle->write_queue);
pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
++pkts, q = uv__queue_head(q)) {
assert(q != NULL);
req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
p = &h[pkts];
memset(p, 0, sizeof(*p));
@ -328,10 +376,7 @@ write_queue_drain:
for (i = 0, q = uv__queue_head(&handle->write_queue);
i < pkts && q != &handle->write_queue;
++i, q = uv__queue_head(&handle->write_queue)) {
assert(q != NULL);
req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
req->status = UV__ERR(errno);
uv__queue_remove(&req->queue);
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
@ -346,10 +391,7 @@ write_queue_drain:
for (i = 0, q = uv__queue_head(&handle->write_queue);
i < (size_t)npkts && q != &handle->write_queue;
++i, q = uv__queue_head(&handle->write_queue)) {
assert(q != NULL);
req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
req->status = req->bufs[0].len;
/* Sending a datagram is an atomic operation: either all data
@ -364,61 +406,31 @@ write_queue_drain:
/* couldn't batch everything, continue sending (jump to avoid stack growth) */
if (!uv__queue_empty(&handle->write_queue))
goto write_queue_drain;
uv__io_feed(handle->loop, &handle->io_watcher);
#else /* __linux__ || ____FreeBSD__ */
uv_udp_send_t* req;
struct msghdr h;
struct uv__queue* q;
ssize_t size;
while (!uv__queue_empty(&handle->write_queue)) {
q = uv__queue_head(&handle->write_queue);
assert(q != NULL);
req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
memset(&h, 0, sizeof h);
if (req->addr.ss_family == AF_UNSPEC) {
h.msg_name = NULL;
h.msg_namelen = 0;
} else {
h.msg_name = &req->addr;
if (req->addr.ss_family == AF_INET6)
h.msg_namelen = sizeof(struct sockaddr_in6);
else if (req->addr.ss_family == AF_INET)
h.msg_namelen = sizeof(struct sockaddr_in);
else if (req->addr.ss_family == AF_UNIX)
h.msg_namelen = sizeof(struct sockaddr_un);
else {
assert(0 && "unsupported address family");
abort();
}
}
h.msg_iov = (struct iovec*) req->bufs;
h.msg_iovlen = req->nbufs;
do {
size = sendmsg(handle->io_watcher.fd, &h, 0);
} while (size == -1 && errno == EINTR);
if (size == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
break;
}
req->status = (size == -1 ? UV__ERR(errno) : size);
/* Sending a datagram is an atomic operation: either all data
* is written or nothing is (and EMSGSIZE is raised). That is
* why we don't handle partial writes. Just pop the request
* off the write queue and onto the completed queue, done.
*/
uv__queue_remove(&req->queue);
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
uv__io_feed(handle->loop, &handle->io_watcher);
}
}
#endif /* __linux__ || ____FreeBSD__ */
static void uv__udp_sendmsg(uv_udp_t* handle) {
struct uv__queue* q;
uv_udp_send_t* req;
if (uv__queue_empty(&handle->write_queue))
return;
q = uv__queue_head(&handle->write_queue);
req = uv__queue_data(q, uv_udp_send_t, queue);
#if defined(__linux__) || defined(__FreeBSD__)
/* Use sendmmsg() if this send request contains more than one datagram OR
* there is more than one send request (because that automatically implies
* there is more than one datagram.)
*/
if (req->nbufs != 1 || &handle->write_queue != uv__queue_next(&req->queue))
return uv__udp_sendmsg_many(handle);
#endif
return uv__udp_sendmsg_one(handle, req);
}
/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional

View File

@ -26,7 +26,6 @@
#include <signal.h>
#include <limits.h>
#include <wchar.h>
#include <malloc.h> /* _alloca */
#include "uv.h"
#include "internal.h"
@ -598,11 +597,9 @@ error:
}
int env_strncmp(const wchar_t* a, int na, const wchar_t* b) {
static int env_strncmp(const wchar_t* a, int na, const wchar_t* b) {
wchar_t* a_eq;
wchar_t* b_eq;
wchar_t* A;
wchar_t* B;
int nb;
int r;
@ -617,27 +614,8 @@ int env_strncmp(const wchar_t* a, int na, const wchar_t* b) {
assert(b_eq);
nb = b_eq - b;
A = _alloca((na+1) * sizeof(wchar_t));
B = _alloca((nb+1) * sizeof(wchar_t));
r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, a, na, A, na);
assert(r==na);
A[na] = L'\0';
r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, b, nb, B, nb);
assert(r==nb);
B[nb] = L'\0';
for (;;) {
wchar_t AA = *A++;
wchar_t BB = *B++;
if (AA < BB) {
return -1;
} else if (AA > BB) {
return 1;
} else if (!AA && !BB) {
return 0;
}
}
r = CompareStringOrdinal(a, na, b, nb, /*case insensitive*/TRUE);
return r - CSTR_EQUAL;
}
@ -676,6 +654,7 @@ int make_program_env(char* env_block[], WCHAR** dst_ptr) {
WCHAR* dst_copy;
WCHAR** ptr_copy;
WCHAR** env_copy;
char* p;
size_t required_vars_value_len[ARRAY_SIZE(required_vars)];
/* first pass: determine size in UTF-16 */
@ -691,11 +670,13 @@ int make_program_env(char* env_block[], WCHAR** dst_ptr) {
}
/* second pass: copy to UTF-16 environment block */
dst_copy = uv__malloc(env_len * sizeof(WCHAR));
if (dst_copy == NULL && env_len > 0) {
len = env_block_count * sizeof(WCHAR*);
p = uv__malloc(len + env_len * sizeof(WCHAR));
if (p == NULL) {
return UV_ENOMEM;
}
env_copy = _alloca(env_block_count * sizeof(WCHAR*));
env_copy = (void*) &p[0];
dst_copy = (void*) &p[len];
ptr = dst_copy;
ptr_copy = env_copy;
@ -745,7 +726,7 @@ int make_program_env(char* env_block[], WCHAR** dst_ptr) {
/* final pass: copy, in sort order, and inserting required variables */
dst = uv__malloc((1+env_len) * sizeof(WCHAR));
if (!dst) {
uv__free(dst_copy);
uv__free(p);
return UV_ENOMEM;
}
@ -790,7 +771,7 @@ int make_program_env(char* env_block[], WCHAR** dst_ptr) {
assert(env_len == (size_t) (ptr - dst));
*ptr = L'\0';
uv__free(dst_copy);
uv__free(p);
*dst_ptr = dst;
return 0;
}
@ -1308,16 +1289,34 @@ static int uv__kill(HANDLE process_handle, int signum) {
/* Unconditionally terminate the process. On Windows, killed processes
* normally return 1. */
int err;
DWORD status;
if (TerminateProcess(process_handle, 1))
return 0;
/* If the process already exited before TerminateProcess was called,.
/* If the process already exited before TerminateProcess was called,
* TerminateProcess will fail with ERROR_ACCESS_DENIED. */
err = GetLastError();
if (err == ERROR_ACCESS_DENIED &&
WaitForSingleObject(process_handle, 0) == WAIT_OBJECT_0) {
return UV_ESRCH;
if (err == ERROR_ACCESS_DENIED) {
/* First check using GetExitCodeProcess() with status different from
* STILL_ACTIVE (259). This check can be set incorrectly by the process,
* though that is uncommon. */
if (GetExitCodeProcess(process_handle, &status) &&
status != STILL_ACTIVE) {
return UV_ESRCH;
}
/* But the process could have exited with code == STILL_ACTIVE, use then
* WaitForSingleObject with timeout zero. This is prone to a race
* condition as it could return WAIT_TIMEOUT because the handle might
* not have been signaled yet.That would result in returning the wrong
* error code here (UV_EACCES instead of UV_ESRCH), but we cannot fix
* the kernel synchronization issue that TerminateProcess is
* inconsistent with WaitForSingleObject with just the APIs available to
* us in user space. */
if (WaitForSingleObject(process_handle, 0) == WAIT_OBJECT_0) {
return UV_ESRCH;
}
}
return uv_translate_sys_error(err);
@ -1325,6 +1324,14 @@ static int uv__kill(HANDLE process_handle, int signum) {
case 0: {
/* Health check: is the process still alive? */
DWORD status;
if (!GetExitCodeProcess(process_handle, &status))
return uv_translate_sys_error(GetLastError());
if (status != STILL_ACTIVE)
return UV_ESRCH;
switch (WaitForSingleObject(process_handle, 0)) {
case WAIT_OBJECT_0:
return UV_ESRCH;

View File

@ -58,11 +58,17 @@ static int uv__tcp_keepalive(uv_tcp_t* handle, SOCKET socket, int enable, unsign
return WSAGetLastError();
}
if (enable && setsockopt(socket,
IPPROTO_TCP,
TCP_KEEPALIVE,
(const char*)&delay,
sizeof delay) == -1) {
if (!enable)
return 0;
if (delay < 1)
return UV_EINVAL;
if (setsockopt(socket,
IPPROTO_TCP,
TCP_KEEPALIVE,
(const char*)&delay,
sizeof delay) == -1) {
return WSAGetLastError();
}

View File

@ -32,45 +32,23 @@
#include "uv.h"
#include "internal.h"
static void uv__once_inner(uv_once_t* guard, void (*callback)(void)) {
DWORD result;
HANDLE existing_event, created_event;
typedef void (*uv__once_cb)(void);
created_event = CreateEvent(NULL, 1, 0, NULL);
if (created_event == 0) {
/* Could fail in a low-memory situation? */
uv_fatal_error(GetLastError(), "CreateEvent");
}
typedef struct {
uv__once_cb callback;
} uv__once_data_t;
existing_event = InterlockedCompareExchangePointer(&guard->event,
created_event,
NULL);
static BOOL WINAPI uv__once_inner(INIT_ONCE *once, void* param, void** context) {
uv__once_data_t* data = param;
if (existing_event == NULL) {
/* We won the race */
callback();
data->callback();
result = SetEvent(created_event);
assert(result);
guard->ran = 1;
} else {
/* We lost the race. Destroy the event we created and wait for the existing
* one to become signaled. */
CloseHandle(created_event);
result = WaitForSingleObject(existing_event, INFINITE);
assert(result == WAIT_OBJECT_0);
}
return TRUE;
}
void uv_once(uv_once_t* guard, void (*callback)(void)) {
/* Fast case - avoid WaitForSingleObject. */
if (guard->ran) {
return;
}
uv__once_inner(guard, callback);
void uv_once(uv_once_t* guard, uv__once_cb callback) {
uv__once_data_t data = { .callback = callback };
InitOnceExecuteOnce(&guard->init_once, uv__once_inner, (void*) &data, NULL);
}

View File

@ -1259,6 +1259,9 @@ int uv_os_getenv(const char* name, char* buffer, size_t* size) {
SetLastError(ERROR_SUCCESS);
len = GetEnvironmentVariableW(name_w, var, varlen);
if (len == 0)
r = uv_translate_sys_error(GetLastError());
if (len < varlen)
break;
@ -1280,15 +1283,8 @@ int uv_os_getenv(const char* name, char* buffer, size_t* size) {
uv__free(name_w);
name_w = NULL;
if (len == 0) {
r = GetLastError();
if (r != ERROR_SUCCESS) {
r = uv_translate_sys_error(r);
goto fail;
}
}
r = uv__copy_utf16_to_utf8(var, len, buffer, size);
if (r == 0)
r = uv__copy_utf16_to_utf8(var, len, buffer, size);
fail:

View File

@ -33,6 +33,11 @@ TEST_IMPL(env_vars) {
int i, r, envcount, found, found_win_special;
uv_env_item_t* envitems;
#if defined(_WIN32) && defined(__ASAN__)
/* See investigation in https://github.com/libuv/libuv/issues/4338 */
RETURN_SKIP("Test does not currently work on Windows under ASAN");
#endif
/* Reject invalid inputs when setting an environment variable */
r = uv_os_setenv(NULL, "foo");
ASSERT_EQ(r, UV_EINVAL);

View File

@ -197,12 +197,13 @@ static void fs_event_cb_dir_multi_file(uv_fs_event_t* handle,
ASSERT_PTR_EQ(handle, &fs_event);
ASSERT_OK(status);
ASSERT(events == UV_CHANGE || events == UV_RENAME);
#if defined(__APPLE__) || defined(_WIN32) || defined(__linux__)
ASSERT_OK(strncmp(filename, file_prefix, sizeof(file_prefix) - 1));
#else
ASSERT_NE(filename == NULL ||
strncmp(filename, file_prefix, sizeof(file_prefix) - 1) == 0, 0);
#endif
#if defined(__APPLE__) || defined(_WIN32) || defined(__linux__)
ASSERT_NOT_NULL(filename);
ASSERT_MEM_EQ(filename, file_prefix, sizeof(file_prefix) - 1);
#else
if (filename != NULL)
ASSERT_MEM_EQ(filename, file_prefix, sizeof(file_prefix) - 1);
#endif
if (fs_event_created + fs_event_removed == fs_event_file_count) {
/* Once we've processed all create events, delete all files */

111
test/test-iouring-pollhup.c Normal file
View File

@ -0,0 +1,111 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "task.h"
#ifdef _WIN32
TEST_IMPL(iouring_pollhup) {
RETURN_SKIP("Not on Windows.");
}
#else /* !_WIN32 */
#include <unistd.h> /* close() */
static uv_pipe_t p1;
static uv_pipe_t p2;
static uv_idle_t idle_handle;
static int iters;
static int duped_fd;
static int newpipefds[2];
static void alloc_buffer(uv_handle_t* handle,
size_t suggested_size,
uv_buf_t* buf) {
static char slab[32];
*buf = uv_buf_init(slab, sizeof(slab));
}
static void read_data2(uv_stream_t* stream,
ssize_t nread,
const uv_buf_t* buf) {
if (nread < 0) {
ASSERT_EQ(nread, UV_EOF);
ASSERT_OK(close(duped_fd));
duped_fd = -1;
uv_close((uv_handle_t*) &p2, NULL);
uv_close((uv_handle_t*) &idle_handle, NULL);
} else {
/* If nread == 0 is because of POLLHUP received still from pipefds[0] file
* description which is still referenced in duped_fd. It should not happen
* if close(p1) was called after EPOLL_CTL_DEL.
*/
ASSERT_GT(nread, 0);
}
}
static void idle_cb(uv_idle_t* handle) {
if (++iters == 1) {
ASSERT_OK(uv_pipe_open(&p2, newpipefds[0]));
ASSERT_OK(uv_read_start((uv_stream_t*) &p2, alloc_buffer, read_data2));
} else {
ASSERT_OK(uv_idle_stop(handle));
ASSERT_OK(close(newpipefds[1]));
newpipefds[1] = -1;
}
}
static void read_data(uv_stream_t* stream,
ssize_t nread,
const uv_buf_t* buf) {
ASSERT_EQ(nread, UV_EOF);
uv_close((uv_handle_t*) stream, NULL);
ASSERT_OK(uv_idle_start(&idle_handle, idle_cb));
}
TEST_IMPL(iouring_pollhup) {
uv_loop_t* loop;
int pipefds[2];
loop = uv_default_loop();
ASSERT_OK(uv_pipe_init(loop, &p1, 0));
ASSERT_OK(uv_pipe_init(loop, &p2, 0));
ASSERT_OK(uv_idle_init(loop, &idle_handle));
ASSERT_OK(pipe(pipefds));
ASSERT_OK(pipe(newpipefds));
ASSERT_OK(uv_pipe_open(&p1, pipefds[0]));
duped_fd = dup(pipefds[0]);
ASSERT_NE(duped_fd, -1);
ASSERT_OK(uv_read_start((uv_stream_t*) &p1, alloc_buffer, read_data));
ASSERT_OK(close(pipefds[1])); /* Close write end, generate POLLHUP. */
pipefds[1] = -1;
ASSERT_OK(uv_run(loop, UV_RUN_DEFAULT));
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}
#endif /* !_WIN32 */

View File

@ -563,6 +563,8 @@ TEST_DECLARE (fork_threadpool_queue_work_simple)
#endif
#endif
TEST_DECLARE (iouring_pollhup)
TEST_DECLARE (idna_toascii)
TEST_DECLARE (utf8_decode1)
TEST_DECLARE (utf8_decode1_overrun)
@ -1204,6 +1206,8 @@ TASK_LIST_START
#endif
#endif
TEST_ENTRY (iouring_pollhup)
TEST_ENTRY (utf8_decode1)
TEST_ENTRY (utf8_decode1_overrun)
TEST_ENTRY (uname)

View File

@ -90,6 +90,45 @@ TEST_IMPL(platform_output) {
ASSERT_GE(par, 1);
printf("uv_available_parallelism: %u\n", par);
#ifdef __linux__
FILE* file;
int cgroup_version = 0;
unsigned int cgroup_par = 0;
uint64_t quota, period;
// Attempt to parse cgroup v2 to deduce parallelism constraints
file = fopen("/sys/fs/cgroup/cpu.max", "r");
if (file) {
if (fscanf(file, "%lu %lu", &quota, &period) == 2 && quota > 0) {
cgroup_version = 2;
cgroup_par = (unsigned int)(quota / period);
}
fclose(file);
}
// If cgroup v2 wasn't present, try parsing cgroup v1
if (cgroup_version == 0) {
file = fopen("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us", "r");
if (file) {
if (fscanf(file, "%lu", &quota) == 1 && quota > 0 && quota < ~0ULL) {
fclose(file);
file = fopen("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us", "r");
if (file && fscanf(file, "%lu", &period) == 1) {
cgroup_version = 1;
cgroup_par = (unsigned int)(quota / period);
}
}
if (file) fclose(file);
}
}
// If we found cgroup parallelism constraints, assert and print them
if (cgroup_par > 0) {
ASSERT_GE(par, cgroup_par);
printf("cgroup v%d available parallelism: %u\n", cgroup_version, cgroup_par);
}
#endif
err = uv_cpu_info(&cpus, &count);
#if defined(__CYGWIN__) || defined(__MSYS__)
ASSERT_EQ(err, UV_ENOSYS);

View File

@ -33,7 +33,8 @@ TEST_IMPL(tcp_flags) {
loop = uv_default_loop();
r = uv_tcp_init(loop, &handle);
/* Use _ex to make sure the socket is created. */
r = uv_tcp_init_ex(loop, &handle, AF_INET);
ASSERT_OK(r);
r = uv_tcp_nodelay(&handle, 1);
@ -42,6 +43,12 @@ TEST_IMPL(tcp_flags) {
r = uv_tcp_keepalive(&handle, 1, 60);
ASSERT_OK(r);
r = uv_tcp_keepalive(&handle, 0, 0);
ASSERT_OK(r);
r = uv_tcp_keepalive(&handle, 1, 0);
ASSERT_EQ(r, UV_EINVAL);
uv_close((uv_handle_t*)&handle, NULL);
r = uv_run(loop, UV_RUN_DEFAULT);