passt: Relicense to GPL 2.0, or any later version
In practical terms, passt doesn't benefit from the additional
protection offered by the AGPL over the GPL, because it's not
suitable to be executed over a computer network.
Further, restricting the distribution under the version 3 of the GPL
wouldn't provide any practical advantage either, as long as the passt
codebase is concerned, and might cause unnecessary compatibility
dilemmas.
Change licensing terms to the GNU General Public License Version 2,
or any later version, with written permission from all current and
past contributors, namely: myself, David Gibson, Laine Stump, Andrea
Bolognani, Paul Holzinger, Richard W.M. Jones, Chris Kuhn, Florian
Weimer, Giuseppe Scrivano, Stefan Hajnoczi, and Vasiliy Ulyanov.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2023-04-05 20:11:44 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2022-03-15 01:07:02 +01:00
|
|
|
|
|
|
|
/* PASTA - Pack A Subtle Tap Abstraction
|
|
|
|
* for network namespace/tap device mode
|
|
|
|
*
|
|
|
|
* tcp_splice.c - direct namespace forwarding for local connections
|
|
|
|
*
|
|
|
|
* Copyright (c) 2020-2022 Red Hat GmbH
|
|
|
|
* Author: Stefano Brivio <sbrivio@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DOC: Theory of Operation
|
|
|
|
*
|
|
|
|
*
|
2022-11-17 06:58:42 +01:00
|
|
|
* For local traffic directed to TCP ports configured for direct
|
|
|
|
* mapping between namespaces, packets are directly translated between
|
|
|
|
* L4 sockets using a pair of splice() syscalls. These connections are
|
2022-11-17 06:58:46 +01:00
|
|
|
* tracked by struct tcp_splice_conn entries in the @tc array, using
|
2022-11-17 06:58:42 +01:00
|
|
|
* these events:
|
2022-03-15 01:07:02 +01:00
|
|
|
*
|
|
|
|
* - SPLICE_CONNECT: connection accepted, connecting to target
|
|
|
|
* - SPLICE_ESTABLISHED: connection to target established
|
2022-11-17 06:58:43 +01:00
|
|
|
* - A_OUT_WAIT: pipe to accepted socket full, wait for EPOLLOUT
|
|
|
|
* - B_OUT_WAIT: pipe to target socket full, wait for EPOLLOUT
|
|
|
|
* - A_FIN_RCVD: FIN (EPOLLRDHUP) seen from accepted socket
|
|
|
|
* - B_FIN_RCVD: FIN (EPOLLRDHUP) seen from target socket
|
|
|
|
* - A_FIN_RCVD: FIN (write shutdown) sent to accepted socket
|
|
|
|
* - B_FIN_RCVD: FIN (write shutdown) sent to target socket
|
2022-03-15 01:07:02 +01:00
|
|
|
*
|
|
|
|
* #syscalls:pasta pipe2|pipe fcntl armv6l:fcntl64 armv7l:fcntl64 ppc64:fcntl64
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sched.h>
|
2023-03-21 04:54:59 +01:00
|
|
|
#include <unistd.h>
|
2023-03-08 04:00:22 +01:00
|
|
|
#include <signal.h>
|
2022-03-15 01:07:02 +01:00
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <stdint.h>
|
tcp, tcp_splice: Fix port remapping for inbound, spliced connections
In pasta mode, when we receive a new inbound connection, we need to
select a socket that was created in the namespace to proceed and
connect() it to its final destination.
The existing condition might pick a wrong socket, though, if the
destination port is remapped, because we'll check the bitmap of
inbound ports using the remapped port (stored in the epoll reference)
as index, and not the original port.
Instead of using the port bitmap for this purpose, store this
information in the epoll reference itself, by adding a new 'outbound'
bit, that's set if the listening socket was created the namespace,
and unset otherwise.
Then, use this bit to pick a socket on the right side.
Suggested-by: David Gibson <david@gibson.dropbear.id.au>
Fixes: 33482d5bf293 ("passt: Add PASTA mode, major rework")
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
2022-10-10 19:00:43 +02:00
|
|
|
#include <stdbool.h>
|
2022-03-15 01:07:02 +01:00
|
|
|
#include <string.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/tcp.h>
|
|
|
|
#include <sys/epoll.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
|
|
|
#include "util.h"
|
|
|
|
#include "passt.h"
|
2022-09-24 09:53:15 +02:00
|
|
|
#include "log.h"
|
2022-11-17 06:58:39 +01:00
|
|
|
#include "tcp_splice.h"
|
2023-09-28 03:21:02 +02:00
|
|
|
#include "siphash.h"
|
2022-11-17 06:58:55 +01:00
|
|
|
#include "inany.h"
|
2022-03-15 01:07:02 +01:00
|
|
|
|
2022-11-17 06:58:43 +01:00
|
|
|
#include "tcp_conn.h"
|
|
|
|
|
2022-04-07 11:41:50 +02:00
|
|
|
#define MAX_PIPE_SIZE (8UL * 1024 * 1024)
|
2022-03-15 01:07:02 +01:00
|
|
|
#define TCP_SPLICE_PIPE_POOL_SIZE 16
|
2022-11-17 06:58:46 +01:00
|
|
|
#define TCP_SPLICE_CONN_PRESSURE 30 /* % of conn_count */
|
2022-03-19 00:33:46 +01:00
|
|
|
#define TCP_SPLICE_FILE_PRESSURE 30 /* % of c->nofile */
|
2022-03-15 01:07:02 +01:00
|
|
|
|
2023-02-14 00:48:21 +01:00
|
|
|
/* Pools for pre-opened sockets (in namespace) */
|
|
|
|
#define TCP_SOCK_POOL_TSH 16 /* Refill in ns if > x used */
|
|
|
|
|
|
|
|
static int ns_sock_pool4 [TCP_SOCK_POOL_SIZE];
|
|
|
|
static int ns_sock_pool6 [TCP_SOCK_POOL_SIZE];
|
2022-03-15 01:07:02 +01:00
|
|
|
|
|
|
|
/* Pool of pre-opened pipes */
|
|
|
|
static int splice_pipe_pool [TCP_SPLICE_PIPE_POOL_SIZE][2][2];
|
|
|
|
|
2022-11-17 06:58:43 +01:00
|
|
|
#define CONN_V6(x) (x->flags & SPLICE_V6)
|
2022-03-15 01:07:02 +01:00
|
|
|
#define CONN_V4(x) (!CONN_V6(x))
|
|
|
|
#define CONN_HAS(conn, set) ((conn->events & (set)) == (set))
|
2023-09-21 06:49:38 +02:00
|
|
|
#define CONN(idx) (&tc[(idx)].splice)
|
2022-11-17 06:58:46 +01:00
|
|
|
#define CONN_IDX(conn) ((union tcp_conn *)(conn) - tc)
|
2022-03-15 01:07:02 +01:00
|
|
|
|
|
|
|
/* Display strings for connection events */
|
|
|
|
static const char *tcp_splice_event_str[] __attribute((__unused__)) = {
|
2022-11-17 06:58:43 +01:00
|
|
|
"SPLICE_CONNECT", "SPLICE_ESTABLISHED", "A_OUT_WAIT", "B_OUT_WAIT",
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
"A_FIN_RCVD", "B_FIN_RCVD", "A_FIN_SENT", "B_FIN_SENT",
|
2022-03-15 01:07:02 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Display strings for connection flags */
|
|
|
|
static const char *tcp_splice_flag_str[] __attribute((__unused__)) = {
|
2022-11-17 06:58:49 +01:00
|
|
|
"SPLICE_V6", "RCVLOWAT_SET_A", "RCVLOWAT_SET_B", "RCVLOWAT_ACT_A",
|
|
|
|
"RCVLOWAT_ACT_B", "CLOSING",
|
2022-03-15 01:07:02 +01:00
|
|
|
};
|
|
|
|
|
2023-02-14 00:48:23 +01:00
|
|
|
/* Forward declaration */
|
|
|
|
static int tcp_sock_refill_ns(void *arg);
|
|
|
|
|
2022-03-15 01:07:02 +01:00
|
|
|
/**
|
|
|
|
* tcp_splice_conn_epoll_events() - epoll events masks for given state
|
|
|
|
* @events: Connection event flags
|
|
|
|
* @a: Event mask for socket with accepted connection, set on return
|
|
|
|
* @b: Event mask for connection target socket, set on return
|
|
|
|
*/
|
|
|
|
static void tcp_splice_conn_epoll_events(uint16_t events,
|
|
|
|
uint32_t *a, uint32_t *b)
|
|
|
|
{
|
|
|
|
*a = *b = 0;
|
|
|
|
|
2022-11-17 06:58:43 +01:00
|
|
|
if (events & SPLICE_ESTABLISHED) {
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
if (!(events & B_FIN_SENT))
|
|
|
|
*a = EPOLLIN | EPOLLRDHUP;
|
|
|
|
if (!(events & A_FIN_SENT))
|
|
|
|
*b = EPOLLIN | EPOLLRDHUP;
|
2022-11-17 06:58:43 +01:00
|
|
|
} else if (events & SPLICE_CONNECT) {
|
2022-03-15 01:07:02 +01:00
|
|
|
*b = EPOLLOUT;
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
*a |= (events & A_OUT_WAIT) ? EPOLLOUT : 0;
|
|
|
|
*b |= (events & B_OUT_WAIT) ? EPOLLOUT : 0;
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
2022-03-26 07:23:21 +01:00
|
|
|
static int tcp_splice_epoll_ctl(const struct ctx *c,
|
|
|
|
struct tcp_splice_conn *conn);
|
2022-03-15 01:07:02 +01:00
|
|
|
|
|
|
|
/**
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
* conn_flag_do() - Set/unset given flag, log, update epoll on CLOSING flag
|
2022-03-15 01:07:02 +01:00
|
|
|
* @c: Execution context
|
|
|
|
* @conn: Connection pointer
|
|
|
|
* @flag: Flag to set, or ~flag to unset
|
|
|
|
*/
|
2022-03-26 07:23:21 +01:00
|
|
|
static void conn_flag_do(const struct ctx *c, struct tcp_splice_conn *conn,
|
2022-03-15 01:07:02 +01:00
|
|
|
unsigned long flag)
|
|
|
|
{
|
|
|
|
if (flag & (flag - 1)) {
|
2023-02-27 02:45:42 +01:00
|
|
|
int flag_index = fls(~flag);
|
|
|
|
|
2022-03-15 01:07:02 +01:00
|
|
|
if (!(conn->flags & ~flag))
|
|
|
|
return;
|
|
|
|
|
|
|
|
conn->flags &= flag;
|
2023-02-27 02:45:42 +01:00
|
|
|
if (flag_index >= 0) {
|
2022-11-17 06:58:42 +01:00
|
|
|
debug("TCP (spliced): index %li: %s dropped", CONN_IDX(conn),
|
2023-02-27 02:45:42 +01:00
|
|
|
tcp_splice_flag_str[flag_index]);
|
2022-04-05 12:51:00 +02:00
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
} else {
|
2023-02-27 02:45:42 +01:00
|
|
|
int flag_index = fls(flag);
|
|
|
|
|
2022-03-15 01:07:02 +01:00
|
|
|
if (conn->flags & flag)
|
|
|
|
return;
|
|
|
|
|
|
|
|
conn->flags |= flag;
|
2023-02-27 02:45:42 +01:00
|
|
|
if (flag_index >= 0) {
|
2022-11-17 06:58:42 +01:00
|
|
|
debug("TCP (spliced): index %li: %s", CONN_IDX(conn),
|
2023-02-27 02:45:42 +01:00
|
|
|
tcp_splice_flag_str[flag_index]);
|
2022-04-05 12:51:00 +02:00
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
2023-11-07 03:42:42 +01:00
|
|
|
if (flag == CLOSING) {
|
|
|
|
epoll_ctl(c->epollfd, EPOLL_CTL_DEL, conn->a, NULL);
|
|
|
|
epoll_ctl(c->epollfd, EPOLL_CTL_DEL, conn->b, NULL);
|
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#define conn_flag(c, conn, flag) \
|
|
|
|
do { \
|
|
|
|
trace("TCP (spliced): flag at %s:%i", \
|
|
|
|
__func__, __LINE__); \
|
|
|
|
conn_flag_do(c, conn, flag); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_splice_epoll_ctl() - Add/modify/delete epoll state from connection events
|
|
|
|
* @c: Execution context
|
|
|
|
* @conn: Connection pointer
|
|
|
|
*
|
|
|
|
* Return: 0 on success, negative error code on failure (not on deletion)
|
|
|
|
*/
|
2022-03-26 07:23:21 +01:00
|
|
|
static int tcp_splice_epoll_ctl(const struct ctx *c,
|
|
|
|
struct tcp_splice_conn *conn)
|
2022-03-15 01:07:02 +01:00
|
|
|
{
|
2023-08-22 07:29:58 +02:00
|
|
|
int m = conn->in_epoll ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
|
2023-08-11 07:12:21 +02:00
|
|
|
union epoll_ref ref_a = { .type = EPOLL_TYPE_TCP, .fd = conn->a,
|
2023-08-01 05:36:46 +02:00
|
|
|
.tcp.index = CONN_IDX(conn) };
|
2023-08-11 07:12:21 +02:00
|
|
|
union epoll_ref ref_b = { .type = EPOLL_TYPE_TCP, .fd = conn->b,
|
2023-08-01 05:36:46 +02:00
|
|
|
.tcp.index = CONN_IDX(conn) };
|
2022-03-15 01:07:02 +01:00
|
|
|
struct epoll_event ev_a = { .data.u64 = ref_a.u64 };
|
|
|
|
struct epoll_event ev_b = { .data.u64 = ref_b.u64 };
|
|
|
|
uint32_t events_a, events_b;
|
|
|
|
|
|
|
|
tcp_splice_conn_epoll_events(conn->events, &events_a, &events_b);
|
|
|
|
ev_a.events = events_a;
|
|
|
|
ev_b.events = events_b;
|
|
|
|
|
|
|
|
if (epoll_ctl(c->epollfd, m, conn->a, &ev_a) ||
|
2023-11-07 03:42:41 +01:00
|
|
|
epoll_ctl(c->epollfd, m, conn->b, &ev_b)) {
|
|
|
|
int ret = -errno;
|
|
|
|
err("TCP (spliced): index %li, ERROR on epoll_ctl(): %s",
|
|
|
|
CONN_IDX(conn), strerror(errno));
|
|
|
|
return ret;
|
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
|
2023-08-22 07:29:58 +02:00
|
|
|
conn->in_epoll = true;
|
2022-03-15 01:07:02 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* conn_event_do() - Set and log connection events, update epoll state
|
|
|
|
* @c: Execution context
|
|
|
|
* @conn: Connection pointer
|
|
|
|
* @event: Connection event
|
|
|
|
*/
|
2022-03-26 07:23:21 +01:00
|
|
|
static void conn_event_do(const struct ctx *c, struct tcp_splice_conn *conn,
|
2022-03-15 01:07:02 +01:00
|
|
|
unsigned long event)
|
|
|
|
{
|
|
|
|
if (event & (event - 1)) {
|
2023-02-27 02:45:42 +01:00
|
|
|
int flag_index = fls(~event);
|
|
|
|
|
2022-03-15 01:07:02 +01:00
|
|
|
if (!(conn->events & ~event))
|
|
|
|
return;
|
|
|
|
|
|
|
|
conn->events &= event;
|
2023-02-27 02:45:42 +01:00
|
|
|
if (flag_index >= 0) {
|
2022-11-17 06:58:42 +01:00
|
|
|
debug("TCP (spliced): index %li, ~%s", CONN_IDX(conn),
|
2023-02-27 02:45:42 +01:00
|
|
|
tcp_splice_event_str[flag_index]);
|
2022-04-05 12:51:00 +02:00
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
} else {
|
2023-02-27 02:45:42 +01:00
|
|
|
int flag_index = fls(event);
|
|
|
|
|
2022-03-15 01:07:02 +01:00
|
|
|
if (conn->events & event)
|
|
|
|
return;
|
|
|
|
|
|
|
|
conn->events |= event;
|
2023-02-27 02:45:42 +01:00
|
|
|
if (flag_index >= 0) {
|
2022-11-17 06:58:42 +01:00
|
|
|
debug("TCP (spliced): index %li, %s", CONN_IDX(conn),
|
2023-02-27 02:45:42 +01:00
|
|
|
tcp_splice_event_str[flag_index]);
|
2022-04-05 12:51:00 +02:00
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (tcp_splice_epoll_ctl(c, conn))
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_flag(c, conn, CLOSING);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#define conn_event(c, conn, event) \
|
|
|
|
do { \
|
|
|
|
trace("TCP (spliced): event at %s:%i", \
|
|
|
|
__func__, __LINE__); \
|
|
|
|
conn_event_do(c, conn, event); \
|
|
|
|
} while (0)
|
|
|
|
|
2022-11-17 06:58:45 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_splice_conn_update() - Update tcp_splice_conn when being moved in the table
|
|
|
|
* @c: Execution context
|
|
|
|
* @new: New location of tcp_splice_conn
|
|
|
|
*/
|
2023-09-29 07:50:19 +02:00
|
|
|
void tcp_splice_conn_update(const struct ctx *c, struct tcp_splice_conn *new)
|
2022-11-17 06:58:45 +01:00
|
|
|
{
|
|
|
|
if (tcp_splice_epoll_ctl(c, new))
|
|
|
|
conn_flag(c, new, CLOSING);
|
|
|
|
}
|
|
|
|
|
2022-03-15 01:07:02 +01:00
|
|
|
/**
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
* tcp_splice_destroy() - Close spliced connection and pipes, clear
|
2022-03-15 01:07:02 +01:00
|
|
|
* @c: Execution context
|
2022-11-19 09:29:54 +01:00
|
|
|
* @conn_union: Spliced connection (container union)
|
2022-03-15 01:07:02 +01:00
|
|
|
*/
|
2022-11-19 09:29:54 +01:00
|
|
|
void tcp_splice_destroy(struct ctx *c, union tcp_conn *conn_union)
|
2022-03-15 01:07:02 +01:00
|
|
|
{
|
2022-11-19 09:29:54 +01:00
|
|
|
struct tcp_splice_conn *conn = &conn_union->splice;
|
|
|
|
|
2022-11-17 06:58:43 +01:00
|
|
|
if (conn->events & SPLICE_ESTABLISHED) {
|
2022-03-15 01:07:02 +01:00
|
|
|
/* Flushing might need to block: don't recycle them. */
|
|
|
|
if (conn->pipe_a_b[0] != -1) {
|
|
|
|
close(conn->pipe_a_b[0]);
|
|
|
|
close(conn->pipe_a_b[1]);
|
|
|
|
conn->pipe_a_b[0] = conn->pipe_a_b[1] = -1;
|
|
|
|
}
|
|
|
|
if (conn->pipe_b_a[0] != -1) {
|
|
|
|
close(conn->pipe_b_a[0]);
|
|
|
|
close(conn->pipe_b_a[1]);
|
|
|
|
conn->pipe_b_a[0] = conn->pipe_b_a[1] = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-17 06:58:43 +01:00
|
|
|
if (conn->events & SPLICE_CONNECT) {
|
2022-03-15 01:07:02 +01:00
|
|
|
close(conn->b);
|
|
|
|
conn->b = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
close(conn->a);
|
|
|
|
conn->a = -1;
|
|
|
|
conn->a_read = conn->a_written = conn->b_read = conn->b_written = 0;
|
|
|
|
|
2022-11-17 06:58:43 +01:00
|
|
|
conn->events = SPLICE_CLOSED;
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn->flags = 0;
|
2022-11-17 06:58:42 +01:00
|
|
|
debug("TCP (spliced): index %li, CLOSED", CONN_IDX(conn));
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
|
2022-11-19 09:29:54 +01:00
|
|
|
tcp_table_compact(c, conn_union);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_splice_connect_finish() - Completion of connect() or call on success
|
|
|
|
* @c: Execution context
|
|
|
|
* @conn: Connection pointer
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -EIO on failure
|
|
|
|
*/
|
2022-03-26 07:23:21 +01:00
|
|
|
static int tcp_splice_connect_finish(const struct ctx *c,
|
2022-03-15 01:07:02 +01:00
|
|
|
struct tcp_splice_conn *conn)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
conn->pipe_a_b[0] = conn->pipe_b_a[0] = -1;
|
|
|
|
conn->pipe_a_b[1] = conn->pipe_b_a[1] = -1;
|
|
|
|
|
|
|
|
for (i = 0; i < TCP_SPLICE_PIPE_POOL_SIZE; i++) {
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
if (splice_pipe_pool[i][0][0] >= 0) {
|
2022-03-15 01:07:02 +01:00
|
|
|
SWAP(conn->pipe_a_b[0], splice_pipe_pool[i][0][0]);
|
|
|
|
SWAP(conn->pipe_a_b[1], splice_pipe_pool[i][0][1]);
|
|
|
|
|
|
|
|
SWAP(conn->pipe_b_a[0], splice_pipe_pool[i][1][0]);
|
|
|
|
SWAP(conn->pipe_b_a[1], splice_pipe_pool[i][1][1]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->pipe_a_b[0] < 0) {
|
2022-03-27 13:10:26 +02:00
|
|
|
if (pipe2(conn->pipe_a_b, O_NONBLOCK | O_CLOEXEC) ||
|
|
|
|
pipe2(conn->pipe_b_a, O_NONBLOCK | O_CLOEXEC)) {
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_flag(c, conn, CLOSING);
|
2022-03-15 01:07:02 +01:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2022-04-05 07:10:30 +02:00
|
|
|
if (fcntl(conn->pipe_a_b[0], F_SETPIPE_SZ, c->tcp.pipe_size)) {
|
|
|
|
trace("TCP (spliced): cannot set a->b pipe size to %lu",
|
|
|
|
c->tcp.pipe_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fcntl(conn->pipe_b_a[0], F_SETPIPE_SZ, c->tcp.pipe_size)) {
|
|
|
|
trace("TCP (spliced): cannot set b->a pipe size to %lu",
|
|
|
|
c->tcp.pipe_size);
|
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
2022-11-17 06:58:43 +01:00
|
|
|
if (!(conn->events & SPLICE_ESTABLISHED))
|
|
|
|
conn_event(c, conn, SPLICE_ESTABLISHED);
|
2022-03-15 01:07:02 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_splice_connect() - Create and connect socket for new spliced connection
|
|
|
|
* @c: Execution context
|
|
|
|
* @conn: Connection pointer
|
|
|
|
* @s: Accepted socket
|
|
|
|
* @port: Destination port, host order
|
|
|
|
*
|
|
|
|
* Return: 0 for connect() succeeded or in progress, negative value on error
|
|
|
|
*/
|
2022-03-26 07:23:21 +01:00
|
|
|
static int tcp_splice_connect(const struct ctx *c, struct tcp_splice_conn *conn,
|
2023-02-14 00:48:23 +01:00
|
|
|
int sock_conn, in_port_t port)
|
2022-03-15 01:07:02 +01:00
|
|
|
{
|
|
|
|
struct sockaddr_in6 addr6 = {
|
|
|
|
.sin6_family = AF_INET6,
|
|
|
|
.sin6_port = htons(port),
|
|
|
|
.sin6_addr = IN6ADDR_LOOPBACK_INIT,
|
|
|
|
};
|
|
|
|
struct sockaddr_in addr4 = {
|
|
|
|
.sin_family = AF_INET,
|
|
|
|
.sin_port = htons(port),
|
|
|
|
.sin_addr = { .s_addr = htonl(INADDR_LOOPBACK) },
|
|
|
|
};
|
|
|
|
const struct sockaddr *sa;
|
|
|
|
socklen_t sl;
|
|
|
|
|
|
|
|
conn->b = sock_conn;
|
|
|
|
|
2022-04-05 07:10:30 +02:00
|
|
|
if (setsockopt(conn->b, SOL_TCP, TCP_QUICKACK,
|
|
|
|
&((int){ 1 }), sizeof(int))) {
|
|
|
|
trace("TCP (spliced): failed to set TCP_QUICKACK on socket %i",
|
|
|
|
conn->b);
|
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
|
|
|
|
if (CONN_V6(conn)) {
|
|
|
|
sa = (struct sockaddr *)&addr6;
|
|
|
|
sl = sizeof(addr6);
|
|
|
|
} else {
|
|
|
|
sa = (struct sockaddr *)&addr4;
|
|
|
|
sl = sizeof(addr4);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (connect(conn->b, sa, sl)) {
|
|
|
|
if (errno != EINPROGRESS) {
|
|
|
|
int ret = -errno;
|
|
|
|
|
|
|
|
close(sock_conn);
|
|
|
|
return ret;
|
|
|
|
}
|
2022-11-17 06:58:43 +01:00
|
|
|
conn_event(c, conn, SPLICE_CONNECT);
|
2022-03-15 01:07:02 +01:00
|
|
|
} else {
|
2022-11-17 06:58:43 +01:00
|
|
|
conn_event(c, conn, SPLICE_ESTABLISHED);
|
2022-03-15 01:07:02 +01:00
|
|
|
return tcp_splice_connect_finish(c, conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2022-10-10 19:00:33 +02:00
|
|
|
* tcp_splice_new() - Handle new spliced connection
|
2022-03-15 01:07:02 +01:00
|
|
|
* @c: Execution context
|
|
|
|
* @conn: Connection pointer
|
|
|
|
* @port: Destination port, host order
|
2023-11-07 02:40:15 +01:00
|
|
|
* @pif: Originating pif of the splice
|
2022-03-15 01:07:02 +01:00
|
|
|
*
|
|
|
|
* Return: return code from connect()
|
|
|
|
*/
|
2022-03-26 07:23:21 +01:00
|
|
|
static int tcp_splice_new(const struct ctx *c, struct tcp_splice_conn *conn,
|
2023-11-07 02:40:15 +01:00
|
|
|
in_port_t port, uint8_t pif)
|
2022-03-15 01:07:02 +01:00
|
|
|
{
|
2023-02-14 00:48:23 +01:00
|
|
|
int s = -1;
|
|
|
|
|
|
|
|
/* If the pool is empty we take slightly different approaches
|
|
|
|
* for init or ns sockets. For init sockets we just open a
|
|
|
|
* new one without refilling the pool to keep latency down.
|
|
|
|
* For ns sockets, we're going to incur the latency of
|
|
|
|
* entering the ns anyway, so we might as well refill the
|
|
|
|
* pool.
|
|
|
|
*/
|
2023-11-07 02:40:15 +01:00
|
|
|
if (pif == PIF_SPLICE) {
|
2023-02-14 00:48:23 +01:00
|
|
|
int *p = CONN_V6(conn) ? init_sock_pool6 : init_sock_pool4;
|
|
|
|
int af = CONN_V6(conn) ? AF_INET6 : AF_INET;
|
|
|
|
|
|
|
|
s = tcp_conn_pool_sock(p);
|
|
|
|
if (s < 0)
|
|
|
|
s = tcp_conn_new_sock(c, af);
|
|
|
|
} else {
|
|
|
|
int *p = CONN_V6(conn) ? ns_sock_pool6 : ns_sock_pool4;
|
2022-03-15 01:07:02 +01:00
|
|
|
|
2023-11-07 02:40:15 +01:00
|
|
|
ASSERT(pif == PIF_HOST);
|
|
|
|
|
2023-02-14 00:48:23 +01:00
|
|
|
/* If pool is empty, refill it first */
|
|
|
|
if (p[TCP_SOCK_POOL_SIZE-1] < 0)
|
|
|
|
NS_CALL(tcp_sock_refill_ns, c);
|
2022-03-15 01:07:02 +01:00
|
|
|
|
2023-02-14 00:48:23 +01:00
|
|
|
s = tcp_conn_pool_sock(p);
|
|
|
|
}
|
tcp, tcp_splice: Fix port remapping for inbound, spliced connections
In pasta mode, when we receive a new inbound connection, we need to
select a socket that was created in the namespace to proceed and
connect() it to its final destination.
The existing condition might pick a wrong socket, though, if the
destination port is remapped, because we'll check the bitmap of
inbound ports using the remapped port (stored in the epoll reference)
as index, and not the original port.
Instead of using the port bitmap for this purpose, store this
information in the epoll reference itself, by adding a new 'outbound'
bit, that's set if the listening socket was created the namespace,
and unset otherwise.
Then, use this bit to pick a socket on the right side.
Suggested-by: David Gibson <david@gibson.dropbear.id.au>
Fixes: 33482d5bf293 ("passt: Add PASTA mode, major rework")
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
2022-10-10 19:00:43 +02:00
|
|
|
|
2023-02-14 00:48:23 +01:00
|
|
|
if (s < 0) {
|
|
|
|
warn("Couldn't open connectable socket for splice (%d)", s);
|
|
|
|
return s;
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return tcp_splice_connect(c, conn, s, port);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_splice_dir() - Set sockets/pipe pointers reflecting flow direction
|
|
|
|
* @conn: Connection pointers
|
|
|
|
* @ref_sock: Socket returned as reference from epoll
|
|
|
|
* @reverse: Reverse direction: @ref_sock is used as destination
|
|
|
|
* @from: Destination socket pointer to set
|
|
|
|
* @to: Source socket pointer to set
|
|
|
|
* @pipes: Pipe set, assigned on return
|
|
|
|
*/
|
|
|
|
static void tcp_splice_dir(struct tcp_splice_conn *conn, int ref_sock,
|
|
|
|
int reverse, int *from, int *to, int **pipes)
|
|
|
|
{
|
|
|
|
if (!reverse) {
|
|
|
|
*from = ref_sock;
|
|
|
|
*to = (*from == conn->a) ? conn->b : conn->a;
|
|
|
|
} else {
|
|
|
|
*to = ref_sock;
|
|
|
|
*from = (*to == conn->a) ? conn->b : conn->a;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pipes = *from == conn->a ? conn->pipe_a_b : conn->pipe_b_a;
|
|
|
|
}
|
|
|
|
|
2022-11-17 06:58:51 +01:00
|
|
|
/**
|
2022-11-17 06:58:52 +01:00
|
|
|
* tcp_splice_conn_from_sock() - Attempt to init state for a spliced connection
|
2022-11-17 06:58:51 +01:00
|
|
|
* @c: Execution context
|
|
|
|
* @ref: epoll reference of listening socket
|
|
|
|
* @conn: connection structure to initialize
|
|
|
|
* @s: Accepted socket
|
2022-11-17 06:58:52 +01:00
|
|
|
* @sa: Peer address of connection
|
2022-11-17 06:58:51 +01:00
|
|
|
*
|
2022-11-17 06:58:52 +01:00
|
|
|
* Return: true if able to create a spliced connection, false otherwise
|
2022-11-17 06:58:51 +01:00
|
|
|
* #syscalls:pasta setsockopt
|
|
|
|
*/
|
2023-09-29 07:50:19 +02:00
|
|
|
bool tcp_splice_conn_from_sock(const struct ctx *c,
|
|
|
|
union tcp_listen_epoll_ref ref,
|
2022-11-17 06:58:52 +01:00
|
|
|
struct tcp_splice_conn *conn, int s,
|
|
|
|
const struct sockaddr *sa)
|
2022-11-17 06:58:51 +01:00
|
|
|
{
|
2022-11-17 06:59:04 +01:00
|
|
|
const struct in_addr *a4;
|
|
|
|
union inany_addr aany;
|
|
|
|
in_port_t port;
|
|
|
|
|
2023-01-16 05:15:27 +01:00
|
|
|
ASSERT(c->mode == MODE_PASTA);
|
2022-11-17 06:58:51 +01:00
|
|
|
|
2022-11-17 06:59:04 +01:00
|
|
|
inany_from_sockaddr(&aany, &port, sa);
|
|
|
|
a4 = inany_v4(&aany);
|
2022-11-17 06:58:52 +01:00
|
|
|
|
2022-11-17 06:59:04 +01:00
|
|
|
if (a4) {
|
|
|
|
if (!IN4_IS_ADDR_LOOPBACK(a4))
|
2022-11-17 06:58:52 +01:00
|
|
|
return false;
|
2022-11-17 06:59:04 +01:00
|
|
|
conn->flags = 0;
|
2022-11-17 06:58:52 +01:00
|
|
|
} else {
|
2022-11-17 06:59:04 +01:00
|
|
|
if (!IN6_IS_ADDR_LOOPBACK(&aany.a6))
|
2022-11-17 06:58:52 +01:00
|
|
|
return false;
|
2022-11-17 06:59:04 +01:00
|
|
|
conn->flags = SPLICE_V6;
|
2022-11-17 06:58:52 +01:00
|
|
|
}
|
|
|
|
|
2022-11-17 06:58:51 +01:00
|
|
|
if (setsockopt(s, SOL_TCP, TCP_QUICKACK, &((int){ 1 }), sizeof(int)))
|
|
|
|
trace("TCP (spliced): failed to set TCP_QUICKACK on %i", s);
|
|
|
|
|
|
|
|
conn->c.spliced = true;
|
|
|
|
conn->a = s;
|
|
|
|
|
2023-11-07 02:40:15 +01:00
|
|
|
if (tcp_splice_new(c, conn, ref.port, ref.pif))
|
2022-11-17 06:58:51 +01:00
|
|
|
conn_flag(c, conn, CLOSING);
|
2022-11-17 06:58:52 +01:00
|
|
|
|
|
|
|
return true;
|
2022-11-17 06:58:51 +01:00
|
|
|
}
|
|
|
|
|
2022-03-15 01:07:02 +01:00
|
|
|
/**
|
2022-11-17 06:58:53 +01:00
|
|
|
* tcp_splice_sock_handler() - Handler for socket mapped to spliced connection
|
2022-03-15 01:07:02 +01:00
|
|
|
* @c: Execution context
|
2022-11-17 06:58:53 +01:00
|
|
|
* @conn: Connection state
|
|
|
|
* @s: Socket fd on which an event has occurred
|
2022-03-15 01:07:02 +01:00
|
|
|
* @events: epoll events bitmap
|
|
|
|
*
|
|
|
|
* #syscalls:pasta splice
|
|
|
|
*/
|
2022-11-17 06:58:53 +01:00
|
|
|
void tcp_splice_sock_handler(struct ctx *c, struct tcp_splice_conn *conn,
|
|
|
|
int s, uint32_t events)
|
2022-03-15 01:07:02 +01:00
|
|
|
{
|
|
|
|
uint8_t lowat_set_flag, lowat_act_flag;
|
|
|
|
int from, to, *pipes, eof, never_read;
|
2022-03-28 16:56:01 +02:00
|
|
|
uint32_t *seq_read, *seq_write;
|
2022-03-15 01:07:02 +01:00
|
|
|
|
2022-11-17 06:58:43 +01:00
|
|
|
if (conn->events == SPLICE_CLOSED)
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (events & EPOLLERR)
|
2022-03-15 01:07:02 +01:00
|
|
|
goto close;
|
|
|
|
|
2022-11-17 06:58:43 +01:00
|
|
|
if (conn->events == SPLICE_CONNECT) {
|
2022-03-15 01:07:02 +01:00
|
|
|
if (!(events & EPOLLOUT))
|
|
|
|
goto close;
|
|
|
|
if (tcp_splice_connect_finish(c, conn))
|
|
|
|
goto close;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (events & EPOLLOUT) {
|
2022-11-17 06:58:53 +01:00
|
|
|
if (s == conn->a)
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_event(c, conn, ~A_OUT_WAIT);
|
2022-03-15 01:07:02 +01:00
|
|
|
else
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_event(c, conn, ~B_OUT_WAIT);
|
2022-03-15 01:07:02 +01:00
|
|
|
|
2022-11-17 06:58:53 +01:00
|
|
|
tcp_splice_dir(conn, s, 1, &from, &to, &pipes);
|
2022-03-15 01:07:02 +01:00
|
|
|
} else {
|
2022-11-17 06:58:53 +01:00
|
|
|
tcp_splice_dir(conn, s, 0, &from, &to, &pipes);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (events & EPOLLRDHUP) {
|
2022-11-17 06:58:53 +01:00
|
|
|
if (s == conn->a)
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_event(c, conn, A_FIN_RCVD);
|
|
|
|
else
|
|
|
|
conn_event(c, conn, B_FIN_RCVD);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (events & EPOLLHUP) {
|
2022-11-17 06:58:53 +01:00
|
|
|
if (s == conn->a)
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_event(c, conn, A_FIN_SENT); /* Fake, but implied */
|
2022-03-15 01:07:02 +01:00
|
|
|
else
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_event(c, conn, B_FIN_SENT);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
swap:
|
|
|
|
eof = 0;
|
|
|
|
never_read = 1;
|
|
|
|
|
|
|
|
if (from == conn->a) {
|
|
|
|
seq_read = &conn->a_read;
|
|
|
|
seq_write = &conn->a_written;
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
lowat_set_flag = RCVLOWAT_SET_A;
|
|
|
|
lowat_act_flag = RCVLOWAT_ACT_A;
|
2022-03-15 01:07:02 +01:00
|
|
|
} else {
|
|
|
|
seq_read = &conn->b_read;
|
|
|
|
seq_write = &conn->b_written;
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
lowat_set_flag = RCVLOWAT_SET_B;
|
|
|
|
lowat_act_flag = RCVLOWAT_ACT_B;
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
ssize_t readlen, to_write = 0, written;
|
2022-04-05 12:43:09 +02:00
|
|
|
int more = 0;
|
2022-03-15 01:07:02 +01:00
|
|
|
|
|
|
|
retry:
|
|
|
|
readlen = splice(from, NULL, pipes[1], NULL, c->tcp.pipe_size,
|
|
|
|
SPLICE_F_MOVE | SPLICE_F_NONBLOCK);
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
trace("TCP (spliced): %li from read-side call", readlen);
|
2022-03-15 01:07:02 +01:00
|
|
|
if (readlen < 0) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
if (errno != EAGAIN)
|
|
|
|
goto close;
|
|
|
|
|
|
|
|
to_write = c->tcp.pipe_size;
|
|
|
|
} else if (!readlen) {
|
|
|
|
eof = 1;
|
|
|
|
to_write = c->tcp.pipe_size;
|
|
|
|
} else {
|
|
|
|
never_read = 0;
|
|
|
|
to_write += readlen;
|
|
|
|
if (readlen >= (long)c->tcp.pipe_size * 90 / 100)
|
|
|
|
more = SPLICE_F_MORE;
|
|
|
|
|
|
|
|
if (conn->flags & lowat_set_flag)
|
|
|
|
conn_flag(c, conn, lowat_act_flag);
|
|
|
|
}
|
|
|
|
|
|
|
|
eintr:
|
|
|
|
written = splice(pipes[0], NULL, to, NULL, to_write,
|
|
|
|
SPLICE_F_MOVE | more | SPLICE_F_NONBLOCK);
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
trace("TCP (spliced): %li from write-side call (passed %lu)",
|
|
|
|
written, to_write);
|
2022-03-15 01:07:02 +01:00
|
|
|
|
|
|
|
/* Most common case: skip updating counters. */
|
|
|
|
if (readlen > 0 && readlen == written) {
|
|
|
|
if (readlen >= (long)c->tcp.pipe_size * 10 / 100)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (conn->flags & lowat_set_flag &&
|
|
|
|
readlen > (long)c->tcp.pipe_size / 10) {
|
|
|
|
int lowat = c->tcp.pipe_size / 4;
|
|
|
|
|
|
|
|
setsockopt(from, SOL_SOCKET, SO_RCVLOWAT,
|
|
|
|
&lowat, sizeof(lowat));
|
|
|
|
|
|
|
|
conn_flag(c, conn, lowat_set_flag);
|
|
|
|
conn_flag(c, conn, lowat_act_flag);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
*seq_read += readlen > 0 ? readlen : 0;
|
|
|
|
*seq_write += written > 0 ? written : 0;
|
|
|
|
|
|
|
|
if (written < 0) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
goto eintr;
|
|
|
|
|
|
|
|
if (errno != EAGAIN)
|
|
|
|
goto close;
|
|
|
|
|
|
|
|
if (never_read)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (to == conn->a)
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_event(c, conn, A_OUT_WAIT);
|
2022-03-15 01:07:02 +01:00
|
|
|
else
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_event(c, conn, B_OUT_WAIT);
|
2022-03-15 01:07:02 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (never_read && written == (long)(c->tcp.pipe_size))
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
if (!never_read && written < to_write) {
|
|
|
|
to_write -= written;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (eof)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
if ((conn->events & A_FIN_RCVD) && !(conn->events & B_FIN_SENT)) {
|
|
|
|
if (*seq_read == *seq_write && eof) {
|
2022-03-15 01:07:02 +01:00
|
|
|
shutdown(conn->b, SHUT_WR);
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_event(c, conn, B_FIN_SENT);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
if ((conn->events & B_FIN_RCVD) && !(conn->events & A_FIN_SENT)) {
|
|
|
|
if (*seq_read == *seq_write && eof) {
|
2022-03-15 01:07:02 +01:00
|
|
|
shutdown(conn->a, SHUT_WR);
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_event(c, conn, A_FIN_SENT);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
if (CONN_HAS(conn, A_FIN_SENT | B_FIN_SENT))
|
2022-03-15 01:07:02 +01:00
|
|
|
goto close;
|
|
|
|
|
|
|
|
if ((events & (EPOLLIN | EPOLLOUT)) == (EPOLLIN | EPOLLOUT)) {
|
|
|
|
events = EPOLLIN;
|
|
|
|
|
|
|
|
SWAP(from, to);
|
|
|
|
if (pipes == conn->pipe_a_b)
|
|
|
|
pipes = conn->pipe_b_a;
|
|
|
|
else
|
|
|
|
pipes = conn->pipe_a_b;
|
|
|
|
|
|
|
|
goto swap;
|
|
|
|
}
|
|
|
|
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
if (events & EPOLLHUP)
|
|
|
|
goto close;
|
|
|
|
|
2022-03-15 01:07:02 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
close:
|
treewide: Packet abstraction with mandatory boundary checks
Implement a packet abstraction providing boundary and size checks
based on packet descriptors: packets stored in a buffer can be queued
into a pool (without storage of its own), and data can be retrieved
referring to an index in the pool, specifying offset and length.
Checks ensure data is not read outside the boundaries of buffer and
descriptors, and that packets added to a pool are within the buffer
range with valid offset and indices.
This implies a wider rework: usage of the "queueing" part of the
abstraction mostly affects tap_handler_{passt,pasta}() functions and
their callees, while the "fetching" part affects all the guest or tap
facing implementations: TCP, UDP, ICMP, ARP, NDP, DHCP and DHCPv6
handlers.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-03-25 13:02:47 +01:00
|
|
|
conn_flag(c, conn, CLOSING);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_set_pipe_size() - Set usable pipe size, probe starting from MAX_PIPE_SIZE
|
|
|
|
* @c: Execution context
|
|
|
|
*/
|
|
|
|
static void tcp_set_pipe_size(struct ctx *c)
|
|
|
|
{
|
|
|
|
int probe_pipe[TCP_SPLICE_PIPE_POOL_SIZE * 2][2], i, j;
|
|
|
|
|
|
|
|
c->tcp.pipe_size = MAX_PIPE_SIZE;
|
|
|
|
|
|
|
|
smaller:
|
|
|
|
for (i = 0; i < TCP_SPLICE_PIPE_POOL_SIZE * 2; i++) {
|
2022-03-27 13:10:26 +02:00
|
|
|
if (pipe2(probe_pipe[i], O_CLOEXEC)) {
|
2022-03-15 01:07:02 +01:00
|
|
|
i++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fcntl(probe_pipe[i][0], F_SETPIPE_SZ, c->tcp.pipe_size) < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = i - 1; j >= 0; j--) {
|
|
|
|
close(probe_pipe[j][0]);
|
|
|
|
close(probe_pipe[j][1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == TCP_SPLICE_PIPE_POOL_SIZE * 2)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!(c->tcp.pipe_size /= 2)) {
|
|
|
|
c->tcp.pipe_size = MAX_PIPE_SIZE;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto smaller;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_splice_pipe_refill() - Refill pool of pre-opened pipes
|
|
|
|
* @c: Execution context
|
|
|
|
*/
|
2023-02-14 00:48:21 +01:00
|
|
|
static void tcp_splice_pipe_refill(const struct ctx *c)
|
2022-03-15 01:07:02 +01:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < TCP_SPLICE_PIPE_POOL_SIZE; i++) {
|
|
|
|
if (splice_pipe_pool[i][0][0] >= 0)
|
|
|
|
break;
|
2022-03-27 13:10:26 +02:00
|
|
|
if (pipe2(splice_pipe_pool[i][0], O_NONBLOCK | O_CLOEXEC))
|
2022-03-15 01:07:02 +01:00
|
|
|
continue;
|
2022-03-27 13:10:26 +02:00
|
|
|
if (pipe2(splice_pipe_pool[i][1], O_NONBLOCK | O_CLOEXEC)) {
|
2022-03-15 01:07:02 +01:00
|
|
|
close(splice_pipe_pool[i][1][0]);
|
|
|
|
close(splice_pipe_pool[i][1][1]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-04-05 07:10:30 +02:00
|
|
|
if (fcntl(splice_pipe_pool[i][0][0], F_SETPIPE_SZ,
|
|
|
|
c->tcp.pipe_size)) {
|
|
|
|
trace("TCP (spliced): cannot set a->b pipe size to %lu",
|
|
|
|
c->tcp.pipe_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fcntl(splice_pipe_pool[i][1][0], F_SETPIPE_SZ,
|
|
|
|
c->tcp.pipe_size)) {
|
|
|
|
trace("TCP (spliced): cannot set b->a pipe size to %lu",
|
|
|
|
c->tcp.pipe_size);
|
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-14 00:48:21 +01:00
|
|
|
/**
|
|
|
|
* tcp_sock_refill_ns() - Refill pools of pre-opened sockets in namespace
|
|
|
|
* @arg: Execution context cast to void *
|
|
|
|
*
|
|
|
|
* Return: 0
|
|
|
|
*/
|
|
|
|
static int tcp_sock_refill_ns(void *arg)
|
|
|
|
{
|
|
|
|
const struct ctx *c = (const struct ctx *)arg;
|
|
|
|
|
|
|
|
ns_enter(c);
|
|
|
|
|
|
|
|
if (c->ifi4)
|
|
|
|
tcp_sock_refill_pool(c, ns_sock_pool4, AF_INET);
|
|
|
|
if (c->ifi6)
|
|
|
|
tcp_sock_refill_pool(c, ns_sock_pool6, AF_INET6);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_splice_refill() - Refill pools of resources needed for splicing
|
|
|
|
* @c: Execution context
|
|
|
|
*/
|
|
|
|
void tcp_splice_refill(const struct ctx *c)
|
|
|
|
{
|
|
|
|
if ((c->ifi4 && ns_sock_pool4[TCP_SOCK_POOL_TSH] < 0) ||
|
|
|
|
(c->ifi6 && ns_sock_pool6[TCP_SOCK_POOL_TSH] < 0))
|
|
|
|
NS_CALL(tcp_sock_refill_ns, c);
|
|
|
|
|
|
|
|
tcp_splice_pipe_refill(c);
|
|
|
|
}
|
|
|
|
|
2022-03-15 01:07:02 +01:00
|
|
|
/**
|
|
|
|
* tcp_splice_init() - Initialise pipe pool and size
|
|
|
|
* @c: Execution context
|
|
|
|
*/
|
|
|
|
void tcp_splice_init(struct ctx *c)
|
|
|
|
{
|
|
|
|
memset(splice_pipe_pool, 0xff, sizeof(splice_pipe_pool));
|
|
|
|
tcp_set_pipe_size(c);
|
2023-02-14 00:48:21 +01:00
|
|
|
|
|
|
|
memset(&ns_sock_pool4, 0xff, sizeof(ns_sock_pool4));
|
|
|
|
memset(&ns_sock_pool6, 0xff, sizeof(ns_sock_pool6));
|
|
|
|
NS_CALL(tcp_sock_refill_ns, c);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_splice_timer() - Timer for spliced connections
|
|
|
|
* @c: Execution context
|
2022-11-19 09:29:54 +01:00
|
|
|
* @conn_union: Spliced connection (container union)
|
2022-03-15 01:07:02 +01:00
|
|
|
*/
|
2022-11-19 09:29:54 +01:00
|
|
|
void tcp_splice_timer(struct ctx *c, union tcp_conn *conn_union)
|
2022-03-15 01:07:02 +01:00
|
|
|
{
|
2022-11-19 09:29:54 +01:00
|
|
|
struct tcp_splice_conn *conn = &conn_union->splice;
|
|
|
|
|
2022-11-17 06:58:48 +01:00
|
|
|
if (conn->flags & CLOSING) {
|
2022-11-19 09:29:54 +01:00
|
|
|
tcp_splice_destroy(c, conn_union);
|
2022-11-17 06:58:48 +01:00
|
|
|
return;
|
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
|
2022-11-17 06:58:48 +01:00
|
|
|
if ( (conn->flags & RCVLOWAT_SET_A) &&
|
|
|
|
!(conn->flags & RCVLOWAT_ACT_A)) {
|
|
|
|
if (setsockopt(conn->a, SOL_SOCKET, SO_RCVLOWAT,
|
|
|
|
&((int){ 1 }), sizeof(int))) {
|
|
|
|
trace("TCP (spliced): can't set SO_RCVLOWAT on "
|
|
|
|
"%i", conn->a);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
2022-11-17 06:58:48 +01:00
|
|
|
conn_flag(c, conn, ~RCVLOWAT_SET_A);
|
|
|
|
}
|
2022-03-15 01:07:02 +01:00
|
|
|
|
2022-11-17 06:58:48 +01:00
|
|
|
if ( (conn->flags & RCVLOWAT_SET_B) &&
|
|
|
|
!(conn->flags & RCVLOWAT_ACT_B)) {
|
|
|
|
if (setsockopt(conn->b, SOL_SOCKET, SO_RCVLOWAT,
|
|
|
|
&((int){ 1 }), sizeof(int))) {
|
|
|
|
trace("TCP (spliced): can't set SO_RCVLOWAT on "
|
|
|
|
"%i", conn->b);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
2022-11-17 06:58:48 +01:00
|
|
|
conn_flag(c, conn, ~RCVLOWAT_SET_B);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|
|
|
|
|
2022-11-17 06:58:48 +01:00
|
|
|
conn_flag(c, conn, ~RCVLOWAT_ACT_A);
|
|
|
|
conn_flag(c, conn, ~RCVLOWAT_ACT_B);
|
2022-03-15 01:07:02 +01:00
|
|
|
}
|