unix: add flag for blocking SIGPROF during poll
Add a per-event loop flag for blocking SIGPROF signals when polling for events. The motivation for this addition is to reduce the number of wakeups and subsequent clock_gettime() system calls when using a sampling profiler. On Linux, this switches from epoll_wait() to epoll_pwait() when enabled. Other platforms bracket the poll syscall with pthread_sigmask() calls. Refs strongloop/strong-agent#3 and strongloop-internal/scrum-cs#37. PR-URL: https://github.com/libuv/libuv/pull/15 Reviewed-By: Saúl Ibarra Corretgé <saghul@gmail.com>
This commit is contained in:
parent
0bcac64512
commit
2daf9448b1
@ -111,6 +111,11 @@ enum {
|
|||||||
UV_TCP_SINGLE_ACCEPT = 0x400 /* Only accept() when idle. */
|
UV_TCP_SINGLE_ACCEPT = 0x400 /* Only accept() when idle. */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* loop flags */
|
||||||
|
enum {
|
||||||
|
UV_LOOP_BLOCK_SIGPROF = 1
|
||||||
|
};
|
||||||
|
|
||||||
/* core */
|
/* core */
|
||||||
int uv__nonblock(int fd, int set);
|
int uv__nonblock(int fd, int set);
|
||||||
int uv__cloexec(int fd, int set);
|
int uv__cloexec(int fd, int set);
|
||||||
|
|||||||
@ -56,9 +56,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
unsigned int nevents;
|
unsigned int nevents;
|
||||||
unsigned int revents;
|
unsigned int revents;
|
||||||
ngx_queue_t* q;
|
ngx_queue_t* q;
|
||||||
|
uv__io_t* w;
|
||||||
|
sigset_t* pset;
|
||||||
|
sigset_t set;
|
||||||
uint64_t base;
|
uint64_t base;
|
||||||
uint64_t diff;
|
uint64_t diff;
|
||||||
uv__io_t* w;
|
|
||||||
int filter;
|
int filter;
|
||||||
int fflags;
|
int fflags;
|
||||||
int count;
|
int count;
|
||||||
@ -118,6 +120,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
w->events = w->pevents;
|
w->events = w->pevents;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pset = NULL;
|
||||||
|
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
||||||
|
pset = &set;
|
||||||
|
sigemptyset(pset);
|
||||||
|
sigaddset(pset, SIGPROF);
|
||||||
|
}
|
||||||
|
|
||||||
assert(timeout >= -1);
|
assert(timeout >= -1);
|
||||||
base = loop->time;
|
base = loop->time;
|
||||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||||
@ -128,6 +137,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
spec.tv_nsec = (timeout % 1000) * 1000000;
|
spec.tv_nsec = (timeout % 1000) * 1000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pset != NULL)
|
||||||
|
pthread_sigmask(SIG_BLOCK, pset, NULL);
|
||||||
|
|
||||||
nfds = kevent(loop->backend_fd,
|
nfds = kevent(loop->backend_fd,
|
||||||
events,
|
events,
|
||||||
nevents,
|
nevents,
|
||||||
@ -135,6 +147,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
ARRAY_SIZE(events),
|
ARRAY_SIZE(events),
|
||||||
timeout == -1 ? NULL : &spec);
|
timeout == -1 ? NULL : &spec);
|
||||||
|
|
||||||
|
if (pset != NULL)
|
||||||
|
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
|
||||||
|
|
||||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||||
* operating system didn't reschedule our process while in the syscall.
|
* operating system didn't reschedule our process while in the syscall.
|
||||||
|
|||||||
@ -33,6 +33,7 @@
|
|||||||
#include <sys/prctl.h>
|
#include <sys/prctl.h>
|
||||||
#include <sys/sysinfo.h>
|
#include <sys/sysinfo.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
#include <signal.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
|
|
||||||
@ -130,6 +131,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
struct uv__epoll_event e;
|
struct uv__epoll_event e;
|
||||||
ngx_queue_t* q;
|
ngx_queue_t* q;
|
||||||
uv__io_t* w;
|
uv__io_t* w;
|
||||||
|
sigset_t* pset;
|
||||||
|
sigset_t set;
|
||||||
uint64_t base;
|
uint64_t base;
|
||||||
uint64_t diff;
|
uint64_t diff;
|
||||||
int nevents;
|
int nevents;
|
||||||
@ -180,12 +183,25 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
w->events = w->pevents;
|
w->events = w->pevents;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pset = NULL;
|
||||||
|
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
||||||
|
pset = &set;
|
||||||
|
sigemptyset(pset);
|
||||||
|
sigaddset(pset, SIGPROF);
|
||||||
|
}
|
||||||
|
|
||||||
assert(timeout >= -1);
|
assert(timeout >= -1);
|
||||||
base = loop->time;
|
base = loop->time;
|
||||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (!no_epoll_wait) {
|
if (no_epoll_wait || pset != NULL) {
|
||||||
|
nfds = uv__epoll_pwait(loop->backend_fd,
|
||||||
|
events,
|
||||||
|
ARRAY_SIZE(events),
|
||||||
|
timeout,
|
||||||
|
pset);
|
||||||
|
} else {
|
||||||
nfds = uv__epoll_wait(loop->backend_fd,
|
nfds = uv__epoll_wait(loop->backend_fd,
|
||||||
events,
|
events,
|
||||||
ARRAY_SIZE(events),
|
ARRAY_SIZE(events),
|
||||||
@ -194,12 +210,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
no_epoll_wait = 1;
|
no_epoll_wait = 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
nfds = uv__epoll_pwait(loop->backend_fd,
|
|
||||||
events,
|
|
||||||
ARRAY_SIZE(events),
|
|
||||||
timeout,
|
|
||||||
NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||||
|
|||||||
@ -112,6 +112,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
struct timespec spec;
|
struct timespec spec;
|
||||||
ngx_queue_t* q;
|
ngx_queue_t* q;
|
||||||
uv__io_t* w;
|
uv__io_t* w;
|
||||||
|
sigset_t* pset;
|
||||||
|
sigset_t set;
|
||||||
uint64_t base;
|
uint64_t base;
|
||||||
uint64_t diff;
|
uint64_t diff;
|
||||||
unsigned int nfds;
|
unsigned int nfds;
|
||||||
@ -119,6 +121,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
int saved_errno;
|
int saved_errno;
|
||||||
int nevents;
|
int nevents;
|
||||||
int count;
|
int count;
|
||||||
|
int err;
|
||||||
int fd;
|
int fd;
|
||||||
|
|
||||||
if (loop->nfds == 0) {
|
if (loop->nfds == 0) {
|
||||||
@ -140,6 +143,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
w->events = w->pevents;
|
w->events = w->pevents;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pset = NULL;
|
||||||
|
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
||||||
|
pset = &set;
|
||||||
|
sigemptyset(pset);
|
||||||
|
sigaddset(pset, SIGPROF);
|
||||||
|
}
|
||||||
|
|
||||||
assert(timeout >= -1);
|
assert(timeout >= -1);
|
||||||
base = loop->time;
|
base = loop->time;
|
||||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||||
@ -155,11 +165,20 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|||||||
|
|
||||||
nfds = 1;
|
nfds = 1;
|
||||||
saved_errno = 0;
|
saved_errno = 0;
|
||||||
if (port_getn(loop->backend_fd,
|
|
||||||
events,
|
if (pset != NULL)
|
||||||
ARRAY_SIZE(events),
|
pthread_sigmask(SIG_BLOCK, pset, NULL);
|
||||||
&nfds,
|
|
||||||
timeout == -1 ? NULL : &spec)) {
|
err = port_getn(loop->backend_fd,
|
||||||
|
events,
|
||||||
|
ARRAY_SIZE(events),
|
||||||
|
&nfds,
|
||||||
|
timeout == -1 ? NULL : &spec);
|
||||||
|
|
||||||
|
if (pset != NULL)
|
||||||
|
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
|
||||||
|
|
||||||
|
if (err) {
|
||||||
/* Work around another kernel bug: port_getn() may return events even
|
/* Work around another kernel bug: port_getn() may return events even
|
||||||
* on error.
|
* on error.
|
||||||
*/
|
*/
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user