tcp_splice: Close sockets right away on high number of open files

We can't take for granted that the hard limit for open files is
big enough as to allow to delay closing sockets to a timer.

Store the value of RTLIMIT_NOFILE we set at start, and use it to
understand if we're approaching the limit with pending, spliced
TCP connections. If that's the case, close sockets right away as
soon as they're not needed, instead of deferring this task to a
timer.

Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
This commit is contained in:
Stefano Brivio 2022-03-19 00:33:46 +01:00
parent be5bbb9b06
commit 92074c16a8
5 changed files with 27 additions and 7 deletions

View file

@ -371,7 +371,7 @@ int main(int argc, char **argv)
perror("getrlimit");
exit(EXIT_FAILURE);
}
limit.rlim_cur = limit.rlim_max;
c.nofile = limit.rlim_cur = limit.rlim_max;
if (setrlimit(RLIMIT_NOFILE, &limit)) {
perror("setrlimit");
exit(EXIT_FAILURE);

View file

@ -98,6 +98,7 @@ enum passt_modes {
* @quiet: Don't print informational messages
* @foreground: Run in foreground, don't log to stderr by default
* @stderr: Force logging to stderr
* @nofile: Maximum number of open files (ulimit -n)
* @sock_path: Path for UNIX domain socket
* @pcap: Path for packet capture file
* @pid_file: Path to PID file, empty string if not configured
@ -160,6 +161,7 @@ struct ctx {
int quiet;
int foreground;
int stderr;
int nofile;
char sock_path[UNIX_PATH_MAX];
char pcap[PATH_MAX];
char pid_file[PATH_MAX];

1
tcp.c
View file

@ -1560,6 +1560,7 @@ void tcp_defer_handler(struct ctx *c)
{
tcp_l2_flags_buf_flush(c);
tcp_l2_data_buf_flush(c);
tcp_splice_defer_handler(c);
}
/**

View file

@ -52,6 +52,7 @@
#define TCP_SPLICE_MAX_CONNS (128 * 1024)
#define TCP_SPLICE_PIPE_POOL_SIZE 16
#define REFILL_INTERVAL 1000 /* ms, refill pool of pipes */
#define TCP_SPLICE_FILE_PRESSURE 30 /* % of c->nofile */
/* From tcp.c */
extern int init_sock_pool4 [TCP_SOCK_POOL_SIZE];
@ -152,6 +153,7 @@ static void tcp_splice_conn_epoll_events(uint16_t events,
*b |= (events & SPLICE_B_OUT_WAIT) ? EPOLLOUT : 0;
}
static void tcp_splice_destroy(struct ctx *c, struct tcp_splice_conn *conn);
static int tcp_splice_epoll_ctl(struct ctx *c, struct tcp_splice_conn *conn);
/**
@ -832,13 +834,9 @@ void tcp_splice_init(struct ctx *c)
*/
void tcp_splice_timer(struct ctx *c, struct timespec *now)
{
int i;
for (i = c->tcp.splice_conn_count - 1; i >= 0; i--) {
struct tcp_splice_conn *conn;
conn = CONN(i);
struct tcp_splice_conn *conn;
for (conn = CONN(c->tcp.splice_conn_count - 1); conn >= tc; conn--) {
if (conn->flags & SPLICE_CLOSING) {
tcp_splice_destroy(c, conn);
continue;
@ -865,3 +863,21 @@ void tcp_splice_timer(struct ctx *c, struct timespec *now)
if (timespec_diff_ms(now, &c->tcp.refill_ts) > REFILL_INTERVAL)
tcp_splice_pipe_refill(c);
}
/**
* tcp_splice_defer_handler() - Close connections without timer on file pressure
* @c: Execution context
*/
void tcp_splice_defer_handler(struct ctx *c)
{
int max_files = c->nofile / 100 * TCP_SPLICE_FILE_PRESSURE;
struct tcp_splice_conn *conn;
if (c->tcp.splice_conn_count * 6 < max_files)
return;
for (conn = CONN(c->tcp.splice_conn_count - 1); conn >= tc; conn--) {
if (conn->flags & SPLICE_CLOSING)
tcp_splice_destroy(c, conn);
}
}

View file

@ -12,3 +12,4 @@ void tcp_sock_handler_splice(struct ctx *c, union epoll_ref ref,
void tcp_splice_destroy(struct ctx *c, struct tcp_splice_conn *conn);
void tcp_splice_init(struct ctx *c);
void tcp_splice_timer(struct ctx *c, struct timespec *now);
void tcp_splice_defer_handler(struct ctx *c);