1
0
Fork 0
mirror of https://passt.top/passt synced 2025-07-27 11:48:01 +02:00

flow: Remove unneeded bound parameter from flow traversal macros

The foreach macros used to step through flows each take a 'bound' parameter
to only scan part of the flow table.  Only one place actually passes a
bound different from FLOW_MAX.  So we can simplify every other invocation
by having that one case manually handle the bound.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
This commit is contained in:
David Gibson 2025-02-19 13:28:35 +11:00 committed by Stefano Brivio
commit b79a22d360

34
flow.c
View file

@ -53,28 +53,28 @@ const uint8_t flow_proto[] = {
static_assert(ARRAY_SIZE(flow_proto) == FLOW_NUM_TYPES, static_assert(ARRAY_SIZE(flow_proto) == FLOW_NUM_TYPES,
"flow_proto[] doesn't match enum flow_type"); "flow_proto[] doesn't match enum flow_type");
#define foreach_flow(flow, bound) \ #define foreach_flow(flow) \
for ((flow) = flowtab; FLOW_IDX(flow) < (bound); (flow)++) \ for ((flow) = flowtab; FLOW_IDX(flow) < FLOW_MAX; (flow)++) \
if ((flow)->f.state == FLOW_STATE_FREE) \ if ((flow)->f.state == FLOW_STATE_FREE) \
(flow) += (flow)->free.n - 1; \ (flow) += (flow)->free.n - 1; \
else else
#define foreach_active_flow(flow, bound) \ #define foreach_active_flow(flow) \
foreach_flow((flow), (bound)) \ foreach_flow((flow)) \
if ((flow)->f.state != FLOW_STATE_ACTIVE) \ if ((flow)->f.state != FLOW_STATE_ACTIVE) \
/* NOLINTNEXTLINE(bugprone-branch-clone) */ \ /* NOLINTNEXTLINE(bugprone-branch-clone) */ \
continue; \ continue; \
else else
#define foreach_tcp_flow(flow, bound) \ #define foreach_tcp_flow(flow) \
foreach_active_flow((flow), (bound)) \ foreach_active_flow((flow)) \
if ((flow)->f.type != FLOW_TCP) \ if ((flow)->f.type != FLOW_TCP) \
/* NOLINTNEXTLINE(bugprone-branch-clone) */ \ /* NOLINTNEXTLINE(bugprone-branch-clone) */ \
continue; \ continue; \
else else
#define foreach_established_tcp_flow(flow, bound) \ #define foreach_established_tcp_flow(flow) \
foreach_tcp_flow((flow), (bound)) \ foreach_tcp_flow((flow)) \
if (!tcp_flow_is_established(&(flow)->tcp)) \ if (!tcp_flow_is_established(&(flow)->tcp)) \
/* NOLINTNEXTLINE(bugprone-branch-clone) */ \ /* NOLINTNEXTLINE(bugprone-branch-clone) */ \
continue; \ continue; \
@ -907,21 +907,23 @@ void flow_defer_handler(const struct ctx *c, const struct timespec *now)
/** /**
* flow_migrate_source_rollback() - Disable repair mode, return failure * flow_migrate_source_rollback() - Disable repair mode, return failure
* @c: Execution context * @c: Execution context
* @max_flow: Maximum index of affected flows * @bound: No need to roll back flow indices >= @bound
* @ret: Negative error code * @ret: Negative error code
* *
* Return: @ret * Return: @ret
*/ */
static int flow_migrate_source_rollback(struct ctx *c, unsigned max_flow, static int flow_migrate_source_rollback(struct ctx *c, unsigned bound, int ret)
int ret)
{ {
union flow *flow; union flow *flow;
debug("...roll back migration"); debug("...roll back migration");
foreach_established_tcp_flow(flow, max_flow) foreach_established_tcp_flow(flow) {
if (FLOW_IDX(flow) >= bound)
break;
if (tcp_flow_repair_off(c, &flow->tcp)) if (tcp_flow_repair_off(c, &flow->tcp))
die("Failed to roll back TCP_REPAIR mode"); die("Failed to roll back TCP_REPAIR mode");
}
if (repair_flush(c)) if (repair_flush(c))
die("Failed to roll back TCP_REPAIR mode"); die("Failed to roll back TCP_REPAIR mode");
@ -941,7 +943,7 @@ static int flow_migrate_repair_all(struct ctx *c, bool enable)
union flow *flow; union flow *flow;
int rc; int rc;
foreach_established_tcp_flow(flow, FLOW_MAX) { foreach_established_tcp_flow(flow) {
if (enable) if (enable)
rc = tcp_flow_repair_on(c, &flow->tcp); rc = tcp_flow_repair_on(c, &flow->tcp);
else else
@ -1005,7 +1007,7 @@ int flow_migrate_source(struct ctx *c, const struct migrate_stage *stage,
(void)c; (void)c;
(void)stage; (void)stage;
foreach_established_tcp_flow(flow, FLOW_MAX) foreach_established_tcp_flow(flow)
count++; count++;
count = htonl(count); count = htonl(count);
@ -1024,7 +1026,7 @@ int flow_migrate_source(struct ctx *c, const struct migrate_stage *stage,
* stream might now be inconsistent, and we might have closed listening * stream might now be inconsistent, and we might have closed listening
* TCP sockets, so just terminate. * TCP sockets, so just terminate.
*/ */
foreach_established_tcp_flow(flow, FLOW_MAX) { foreach_established_tcp_flow(flow) {
rc = tcp_flow_migrate_source(fd, &flow->tcp); rc = tcp_flow_migrate_source(fd, &flow->tcp);
if (rc) { if (rc) {
err("Can't send data, flow %u: %s", FLOW_IDX(flow), err("Can't send data, flow %u: %s", FLOW_IDX(flow),
@ -1051,7 +1053,7 @@ int flow_migrate_source(struct ctx *c, const struct migrate_stage *stage,
* failures but not if the stream might be inconsistent (reported here * failures but not if the stream might be inconsistent (reported here
* as EIO). * as EIO).
*/ */
foreach_established_tcp_flow(flow, FLOW_MAX) { foreach_established_tcp_flow(flow) {
rc = tcp_flow_migrate_source_ext(fd, &flow->tcp); rc = tcp_flow_migrate_source_ext(fd, &flow->tcp);
if (rc) { if (rc) {
err("Extended data for flow %u: %s", FLOW_IDX(flow), err("Extended data for flow %u: %s", FLOW_IDX(flow),