mirror of
https://passt.top/passt
synced 2025-06-18 13:05:35 +02:00
flow: Enforce that freeing of closed flows must happen in deferred handlers
Currently, flows are only evern finally freed (and the table compacted) from the deferred handlers. Some future ways we want to optimise managing the flow table will rely on this, so enforce it: rather than having the TCP code directly call flow_table_compact(), add a boolean return value to the per-flow deferred handlers. If true, this indicates that the flow code itself should free the flow. This forces all freeing of flows to occur during the flow code's scan of the table in flow_defer_handler() which opens possibilities for future optimisations. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
This commit is contained in:
parent
4a849e9526
commit
9c0881d4f6
5 changed files with 21 additions and 15 deletions
|
@ -158,8 +158,8 @@ extern int init_sock_pool6 [TCP_SOCK_POOL_SIZE];
|
|||
void tcp_tap_conn_update(const struct ctx *c, struct tcp_tap_conn *old,
|
||||
struct tcp_tap_conn *new);
|
||||
void tcp_splice_conn_update(const struct ctx *c, struct tcp_splice_conn *new);
|
||||
void tcp_flow_defer(const struct ctx *c, union flow *flow);
|
||||
void tcp_splice_flow_defer(const struct ctx *c, union flow *flow);
|
||||
bool tcp_flow_defer(union flow *flow);
|
||||
bool tcp_splice_flow_defer(union flow *flow);
|
||||
void tcp_splice_timer(const struct ctx *c, union flow *flow);
|
||||
int tcp_conn_pool_sock(int pool[]);
|
||||
int tcp_conn_new_sock(const struct ctx *c, sa_family_t af);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue