vhost-user: use guest buffer directly in vu_handle_tx()
Check the buffer address is correctly in the mmap'ed memory. Signed-off-by: Laurent Vivier <lvivier@redhat.com>
This commit is contained in:
parent
37f457a76c
commit
1bf4abe402
7 changed files with 85 additions and 73 deletions
39
iov.c
39
iov.c
|
@ -156,42 +156,3 @@ size_t iov_size(const struct iovec *iov, size_t iov_cnt)
|
|||
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* iov_copy - Copy data from one scatter/gather I/O vector (struct iovec) to
|
||||
* another.
|
||||
*
|
||||
* @dst_iov: Pointer to the destination array of struct iovec describing
|
||||
* the scatter/gather I/O vector to copy to.
|
||||
* @dst_iov_cnt: Number of elements in the destination iov array.
|
||||
* @iov: Pointer to the source array of struct iovec describing
|
||||
* the scatter/gather I/O vector to copy from.
|
||||
* @iov_cnt: Number of elements in the source iov array.
|
||||
* @offset: Offset within the source iov from where copying should start.
|
||||
* @bytes: Total number of bytes to copy from iov to dst_iov.
|
||||
*
|
||||
* Returns: The number of elements successfully copied to the destination
|
||||
* iov array.
|
||||
*/
|
||||
/* cppcheck-suppress unusedFunction */
|
||||
unsigned iov_copy(struct iovec *dst_iov, size_t dst_iov_cnt,
|
||||
const struct iovec *iov, size_t iov_cnt,
|
||||
size_t offset, size_t bytes)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
||||
i = iov_skip_bytes(iov, iov_cnt, offset, &offset);
|
||||
|
||||
/* copying data */
|
||||
for (j = 0; i < iov_cnt && j < dst_iov_cnt && bytes; i++) {
|
||||
size_t len = MIN(bytes, iov[i].iov_len - offset);
|
||||
|
||||
dst_iov[j].iov_base = (char *)iov[i].iov_base + offset;
|
||||
dst_iov[j].iov_len = len;
|
||||
j++;
|
||||
bytes -= len;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
return j;
|
||||
}
|
||||
|
|
3
iov.h
3
iov.h
|
@ -25,7 +25,4 @@ size_t iov_from_buf(const struct iovec *iov, size_t iov_cnt,
|
|||
size_t iov_to_buf(const struct iovec *iov, size_t iov_cnt,
|
||||
size_t offset, void *buf, size_t bytes);
|
||||
size_t iov_size(const struct iovec *iov, size_t iov_cnt);
|
||||
unsigned iov_copy(struct iovec *dst_iov, size_t dst_iov_cnt,
|
||||
const struct iovec *iov, size_t iov_cnt,
|
||||
size_t offset, size_t bytes);
|
||||
#endif /* IOVEC_H */
|
||||
|
|
6
packet.c
6
packet.c
|
@ -25,6 +25,12 @@
|
|||
static int packet_check_range(const struct pool *p, size_t offset, size_t len,
|
||||
const char *start, const char *func, int line)
|
||||
{
|
||||
ASSERT(p->buf);
|
||||
|
||||
if (p->buf_size == 0)
|
||||
return vu_packet_check_range((void *)p->buf, offset, len, start,
|
||||
func, line);
|
||||
|
||||
if (start < p->buf) {
|
||||
if (func) {
|
||||
trace("add packet start %p before buffer start %p, "
|
||||
|
|
2
packet.h
2
packet.h
|
@ -22,6 +22,8 @@ struct pool {
|
|||
struct iovec pkt[1];
|
||||
};
|
||||
|
||||
int vu_packet_check_range(void *buf, size_t offset, size_t len,
|
||||
const char *start, const char *func, int line);
|
||||
void packet_add_do(struct pool *p, size_t len, const char *start,
|
||||
const char *func, int line);
|
||||
void *packet_get_do(const struct pool *p, const size_t idx,
|
||||
|
|
39
tap.c
39
tap.c
|
@ -707,7 +707,7 @@ resume:
|
|||
if (!eh)
|
||||
continue;
|
||||
if (ntohs(eh->h_proto) == ETH_P_ARP) {
|
||||
PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
|
||||
PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
|
||||
|
||||
packet_add(pkt, l2_len, (char *)eh);
|
||||
arp(c, pkt);
|
||||
|
@ -747,7 +747,7 @@ resume:
|
|||
continue;
|
||||
|
||||
if (iph->protocol == IPPROTO_ICMP) {
|
||||
PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
|
||||
PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
|
||||
|
||||
if (c->no_icmp)
|
||||
continue;
|
||||
|
@ -766,7 +766,7 @@ resume:
|
|||
continue;
|
||||
|
||||
if (iph->protocol == IPPROTO_UDP) {
|
||||
PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
|
||||
PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
|
||||
|
||||
packet_add(pkt, l2_len, (char *)eh);
|
||||
if (dhcp(c, pkt))
|
||||
|
@ -915,7 +915,7 @@ resume:
|
|||
}
|
||||
|
||||
if (proto == IPPROTO_ICMPV6) {
|
||||
PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
|
||||
PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
|
||||
|
||||
if (c->no_icmp)
|
||||
continue;
|
||||
|
@ -939,7 +939,7 @@ resume:
|
|||
uh = (struct udphdr *)l4h;
|
||||
|
||||
if (proto == IPPROTO_UDP) {
|
||||
PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
|
||||
PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
|
||||
|
||||
packet_add(pkt, l4_len, l4h);
|
||||
|
||||
|
@ -1391,6 +1391,23 @@ static void tap_sock_tun_init(struct ctx *c)
|
|||
epoll_ctl(c->epollfd, EPOLL_CTL_ADD, c->fd_tap, &ev);
|
||||
}
|
||||
|
||||
void tap_sock_update_buf(void *base, size_t size)
|
||||
{
|
||||
int i;
|
||||
|
||||
pool_tap4_storage.buf = base;
|
||||
pool_tap4_storage.buf_size = size;
|
||||
pool_tap6_storage.buf = base;
|
||||
pool_tap6_storage.buf_size = size;
|
||||
|
||||
for (i = 0; i < TAP_SEQS; i++) {
|
||||
tap4_l4[i].p.buf = base;
|
||||
tap4_l4[i].p.buf_size = size;
|
||||
tap6_l4[i].p.buf = base;
|
||||
tap6_l4[i].p.buf_size = size;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tap_sock_init() - Create and set up AF_UNIX socket or tuntap file descriptor
|
||||
* @c: Execution context
|
||||
|
@ -1402,10 +1419,22 @@ void tap_sock_init(struct ctx *c)
|
|||
|
||||
pool_tap4_storage = PACKET_INIT(pool_tap4, TAP_MSGS, pkt_buf, sz);
|
||||
pool_tap6_storage = PACKET_INIT(pool_tap6, TAP_MSGS, pkt_buf, sz);
|
||||
if (c->mode == MODE_VU) {
|
||||
pool_tap4_storage.buf = NULL;
|
||||
pool_tap4_storage.buf_size = 0;
|
||||
pool_tap6_storage.buf = NULL;
|
||||
pool_tap6_storage.buf_size = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < TAP_SEQS; i++) {
|
||||
tap4_l4[i].p = PACKET_INIT(pool_l4, UIO_MAXIOV, pkt_buf, sz);
|
||||
tap6_l4[i].p = PACKET_INIT(pool_l4, UIO_MAXIOV, pkt_buf, sz);
|
||||
if (c->mode == MODE_VU) {
|
||||
tap4_l4[i].p.buf = NULL;
|
||||
tap4_l4[i].p.buf_size = 0;
|
||||
tap6_l4[i].p.buf = NULL;
|
||||
tap6_l4[i].p.buf_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (c->fd_tap != -1) { /* Passed as --fd */
|
||||
|
|
1
tap.h
1
tap.h
|
@ -98,6 +98,7 @@ void tap_handler_pasta(struct ctx *c, uint32_t events,
|
|||
void tap_handler_passt(struct ctx *c, uint32_t events,
|
||||
const struct timespec *now);
|
||||
void tap_sock_reset(struct ctx *c);
|
||||
void tap_sock_update_buf(void *base, size_t size);
|
||||
void tap_sock_init(struct ctx *c);
|
||||
void pool_flush_all(void);
|
||||
void tap_handler_all(struct ctx *c, const struct timespec *now);
|
||||
|
|
68
vhost_user.c
68
vhost_user.c
|
@ -334,6 +334,25 @@ static bool map_ring(VuDev *vdev, VuVirtq *vq)
|
|||
return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
|
||||
}
|
||||
|
||||
int vu_packet_check_range(void *buf, size_t offset, size_t len, const char *start,
|
||||
const char *func, int line)
|
||||
{
|
||||
VuDevRegion *dev_region;
|
||||
|
||||
for (dev_region = buf; dev_region->mmap_addr; dev_region++) {
|
||||
if ((char *)dev_region->mmap_addr <= start &&
|
||||
start + offset + len < (char *)dev_region->mmap_addr +
|
||||
dev_region->mmap_offset +
|
||||
dev_region->size)
|
||||
return 0;
|
||||
}
|
||||
if (func) {
|
||||
trace("cannot find region, %s:%i", func, line);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* #syscalls:passt mmap munmap
|
||||
*/
|
||||
|
@ -400,6 +419,12 @@ static bool vu_set_mem_table_exec(VuDev *vdev,
|
|||
}
|
||||
}
|
||||
|
||||
/* XXX */
|
||||
ASSERT(vdev->nregions < VHOST_USER_MAX_RAM_SLOTS - 1);
|
||||
vdev->regions[vdev->nregions].mmap_addr = 0; /* mark EOF for vu_packet_check_range() */
|
||||
|
||||
tap_sock_update_buf(vdev->regions, 0);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -650,8 +675,8 @@ static void vu_handle_tx(VuDev *vdev, int index)
|
|||
VuVirtq *vq = &vdev->vq[index];
|
||||
int hdrlen = vdev->hdrlen;
|
||||
struct timespec now;
|
||||
char *p;
|
||||
size_t n;
|
||||
unsigned int indexes[VIRTQUEUE_MAX_SIZE];
|
||||
int count;
|
||||
|
||||
if (index % 2 != VHOST_USER_TX_QUEUE) {
|
||||
debug("index %d is not an TX queue", index);
|
||||
|
@ -660,14 +685,11 @@ static void vu_handle_tx(VuDev *vdev, int index)
|
|||
|
||||
clock_gettime(CLOCK_MONOTONIC, &now);
|
||||
|
||||
p = pkt_buf;
|
||||
|
||||
pool_flush_all();
|
||||
|
||||
count = 0;
|
||||
while (1) {
|
||||
VuVirtqElement *elem;
|
||||
unsigned int out_num;
|
||||
struct iovec sg[VIRTQUEUE_MAX_SIZE], *out_sg;
|
||||
|
||||
ASSERT(index == VHOST_USER_TX_QUEUE);
|
||||
elem = vu_queue_pop(vdev, vq, sizeof(VuVirtqElement), buffer[index]);
|
||||
|
@ -675,32 +697,26 @@ static void vu_handle_tx(VuDev *vdev, int index)
|
|||
break;
|
||||
}
|
||||
|
||||
out_num = elem->out_num;
|
||||
out_sg = elem->out_sg;
|
||||
if (out_num < 1) {
|
||||
if (elem->out_num < 1) {
|
||||
debug("virtio-net header not in first element");
|
||||
break;
|
||||
}
|
||||
ASSERT(elem->out_num == 1);
|
||||
|
||||
if (hdrlen) {
|
||||
unsigned sg_num;
|
||||
|
||||
sg_num = iov_copy(sg, ARRAY_SIZE(sg), out_sg, out_num,
|
||||
hdrlen, -1);
|
||||
out_num = sg_num;
|
||||
out_sg = sg;
|
||||
}
|
||||
|
||||
n = iov_to_buf(out_sg, out_num, 0, p, TAP_BUF_FILL);
|
||||
|
||||
packet_add_all(c, n, p);
|
||||
|
||||
p += n;
|
||||
|
||||
vu_queue_push(vdev, vq, elem, 0);
|
||||
vu_queue_notify(vdev, vq);
|
||||
packet_add_all(c, elem->out_sg[0].iov_len - hdrlen,
|
||||
(char *)elem->out_sg[0].iov_base + hdrlen);
|
||||
indexes[count] = elem->index;
|
||||
count++;
|
||||
}
|
||||
tap_handler_all(c, &now);
|
||||
|
||||
if (count) {
|
||||
int i;
|
||||
for (i = 0; i < count; i++)
|
||||
vu_queue_fill_by_index(vdev, vq, indexes[i], 0, i);
|
||||
vu_queue_flush(vdev, vq, count);
|
||||
vu_queue_notify(vdev, vq);
|
||||
}
|
||||
}
|
||||
|
||||
void vu_kick_cb(struct ctx *c, union epoll_ref ref)
|
||||
|
|
Loading…
Reference in a new issue