unix: add flag for blocking SIGPROF during poll
Add a per-event loop flag for blocking SIGPROF signals when polling for events. The motivation for this addition is to reduce the number of wakeups and subsequent clock_gettime() system calls when using a sampling profiler. On Linux, this switches from epoll_wait() to epoll_pwait() when enabled. Other platforms bracket the poll syscall with pthread_sigmask() calls. Refs strongloop/strong-agent#3 and strongloop-internal/scrum-cs#37. PR-URL: https://github.com/libuv/libuv/pull/15 Reviewed-By: Saúl Ibarra Corretgé <saghul@gmail.com>
This commit is contained in:
parent
0bcac64512
commit
2daf9448b1
@ -111,6 +111,11 @@ enum {
|
||||
UV_TCP_SINGLE_ACCEPT = 0x400 /* Only accept() when idle. */
|
||||
};
|
||||
|
||||
/* loop flags */
|
||||
enum {
|
||||
UV_LOOP_BLOCK_SIGPROF = 1
|
||||
};
|
||||
|
||||
/* core */
|
||||
int uv__nonblock(int fd, int set);
|
||||
int uv__cloexec(int fd, int set);
|
||||
|
||||
@ -56,9 +56,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
unsigned int nevents;
|
||||
unsigned int revents;
|
||||
ngx_queue_t* q;
|
||||
uv__io_t* w;
|
||||
sigset_t* pset;
|
||||
sigset_t set;
|
||||
uint64_t base;
|
||||
uint64_t diff;
|
||||
uv__io_t* w;
|
||||
int filter;
|
||||
int fflags;
|
||||
int count;
|
||||
@ -118,6 +120,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
w->events = w->pevents;
|
||||
}
|
||||
|
||||
pset = NULL;
|
||||
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
||||
pset = &set;
|
||||
sigemptyset(pset);
|
||||
sigaddset(pset, SIGPROF);
|
||||
}
|
||||
|
||||
assert(timeout >= -1);
|
||||
base = loop->time;
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
@ -128,6 +137,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
spec.tv_nsec = (timeout % 1000) * 1000000;
|
||||
}
|
||||
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_BLOCK, pset, NULL);
|
||||
|
||||
nfds = kevent(loop->backend_fd,
|
||||
events,
|
||||
nevents,
|
||||
@ -135,6 +147,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
ARRAY_SIZE(events),
|
||||
timeout == -1 ? NULL : &spec);
|
||||
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
|
||||
@ -33,6 +33,7 @@
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/sysinfo.h>
|
||||
#include <unistd.h>
|
||||
#include <signal.h>
|
||||
#include <fcntl.h>
|
||||
#include <time.h>
|
||||
|
||||
@ -130,6 +131,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
struct uv__epoll_event e;
|
||||
ngx_queue_t* q;
|
||||
uv__io_t* w;
|
||||
sigset_t* pset;
|
||||
sigset_t set;
|
||||
uint64_t base;
|
||||
uint64_t diff;
|
||||
int nevents;
|
||||
@ -180,12 +183,25 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
w->events = w->pevents;
|
||||
}
|
||||
|
||||
pset = NULL;
|
||||
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
||||
pset = &set;
|
||||
sigemptyset(pset);
|
||||
sigaddset(pset, SIGPROF);
|
||||
}
|
||||
|
||||
assert(timeout >= -1);
|
||||
base = loop->time;
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
|
||||
for (;;) {
|
||||
if (!no_epoll_wait) {
|
||||
if (no_epoll_wait || pset != NULL) {
|
||||
nfds = uv__epoll_pwait(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
timeout,
|
||||
pset);
|
||||
} else {
|
||||
nfds = uv__epoll_wait(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
@ -194,12 +210,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
no_epoll_wait = 1;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
nfds = uv__epoll_pwait(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
timeout,
|
||||
NULL);
|
||||
}
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
|
||||
@ -112,6 +112,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
struct timespec spec;
|
||||
ngx_queue_t* q;
|
||||
uv__io_t* w;
|
||||
sigset_t* pset;
|
||||
sigset_t set;
|
||||
uint64_t base;
|
||||
uint64_t diff;
|
||||
unsigned int nfds;
|
||||
@ -119,6 +121,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int saved_errno;
|
||||
int nevents;
|
||||
int count;
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
@ -140,6 +143,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
w->events = w->pevents;
|
||||
}
|
||||
|
||||
pset = NULL;
|
||||
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
||||
pset = &set;
|
||||
sigemptyset(pset);
|
||||
sigaddset(pset, SIGPROF);
|
||||
}
|
||||
|
||||
assert(timeout >= -1);
|
||||
base = loop->time;
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
@ -155,11 +165,20 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
|
||||
nfds = 1;
|
||||
saved_errno = 0;
|
||||
if (port_getn(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
&nfds,
|
||||
timeout == -1 ? NULL : &spec)) {
|
||||
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_BLOCK, pset, NULL);
|
||||
|
||||
err = port_getn(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
&nfds,
|
||||
timeout == -1 ? NULL : &spec);
|
||||
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
|
||||
|
||||
if (err) {
|
||||
/* Work around another kernel bug: port_getn() may return events even
|
||||
* on error.
|
||||
*/
|
||||
|
||||
Loading…
Reference in New Issue
Block a user