2020-09-17 23:02:40 +03:00
|
|
|
// Copyright (c) Vitaliy Filippov, 2019+
|
2021-02-06 01:26:07 +03:00
|
|
|
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
|
2020-09-17 23:02:40 +03:00
|
|
|
|
2020-10-06 02:35:11 +03:00
|
|
|
#include <stdlib.h>
|
2023-07-04 00:23:59 +03:00
|
|
|
#include <unistd.h>
|
2020-10-06 02:35:11 +03:00
|
|
|
|
|
|
|
#include <stdexcept>
|
|
|
|
|
2023-07-03 00:42:31 +03:00
|
|
|
#include <sys/eventfd.h>
|
|
|
|
|
2019-11-05 02:12:04 +03:00
|
|
|
#include "ringloop.h"
|
|
|
|
|
|
|
|
ring_loop_t::ring_loop_t(int qd)
|
|
|
|
{
|
2019-11-16 02:12:27 +03:00
|
|
|
int ret = io_uring_queue_init(qd, &ring, 0);
|
2019-11-05 02:12:04 +03:00
|
|
|
if (ret < 0)
|
|
|
|
{
|
2019-11-16 02:12:27 +03:00
|
|
|
throw std::runtime_error(std::string("io_uring_queue_init: ") + strerror(-ret));
|
2019-11-05 02:12:04 +03:00
|
|
|
}
|
2023-11-20 03:03:33 +03:00
|
|
|
free_ring_data_ptr = *ring.sq.kring_entries;
|
2020-10-20 10:44:38 +03:00
|
|
|
ring_datas = (struct ring_data_t*)calloc(free_ring_data_ptr, sizeof(ring_data_t));
|
2019-12-17 01:44:08 +03:00
|
|
|
free_ring_data = (int*)malloc(sizeof(int) * free_ring_data_ptr);
|
|
|
|
if (!ring_datas || !free_ring_data)
|
2019-11-05 02:12:04 +03:00
|
|
|
{
|
2019-11-16 02:12:27 +03:00
|
|
|
throw std::bad_alloc();
|
2019-11-05 02:12:04 +03:00
|
|
|
}
|
2019-12-17 01:44:08 +03:00
|
|
|
for (int i = 0; i < free_ring_data_ptr; i++)
|
|
|
|
{
|
|
|
|
free_ring_data[i] = i;
|
|
|
|
}
|
2019-11-05 02:12:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ring_loop_t::~ring_loop_t()
|
|
|
|
{
|
2019-12-17 01:44:08 +03:00
|
|
|
free(free_ring_data);
|
|
|
|
free(ring_datas);
|
2019-11-17 17:45:39 +03:00
|
|
|
io_uring_queue_exit(&ring);
|
2023-07-03 00:42:31 +03:00
|
|
|
if (ring_eventfd)
|
|
|
|
{
|
|
|
|
close(ring_eventfd);
|
|
|
|
}
|
2019-11-05 02:12:04 +03:00
|
|
|
}
|
|
|
|
|
2020-03-04 21:00:15 +03:00
|
|
|
void ring_loop_t::register_consumer(ring_consumer_t *consumer)
|
2019-11-05 02:12:04 +03:00
|
|
|
{
|
2020-03-04 21:00:15 +03:00
|
|
|
unregister_consumer(consumer);
|
2019-11-05 02:12:04 +03:00
|
|
|
consumers.push_back(consumer);
|
|
|
|
}
|
|
|
|
|
2019-12-13 22:53:59 +03:00
|
|
|
void ring_loop_t::wakeup()
|
2019-11-05 02:12:04 +03:00
|
|
|
{
|
2019-11-18 14:08:11 +03:00
|
|
|
loop_again = true;
|
|
|
|
}
|
|
|
|
|
2020-03-04 21:00:15 +03:00
|
|
|
void ring_loop_t::unregister_consumer(ring_consumer_t *consumer)
|
2019-11-18 14:08:11 +03:00
|
|
|
{
|
2020-03-04 21:00:15 +03:00
|
|
|
for (int i = 0; i < consumers.size(); i++)
|
2019-11-05 02:12:04 +03:00
|
|
|
{
|
2020-03-04 21:00:15 +03:00
|
|
|
if (consumers[i] == consumer)
|
|
|
|
{
|
|
|
|
consumers.erase(consumers.begin()+i, consumers.begin()+i+1);
|
|
|
|
break;
|
|
|
|
}
|
2019-11-05 02:12:04 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-18 14:08:11 +03:00
|
|
|
void ring_loop_t::loop()
|
2019-11-05 02:12:04 +03:00
|
|
|
{
|
2023-07-20 23:27:34 +03:00
|
|
|
if (ring_eventfd >= 0)
|
|
|
|
{
|
|
|
|
// Reset eventfd counter
|
|
|
|
uint64_t ctr = 0;
|
|
|
|
int r = read(ring_eventfd, &ctr, 8);
|
|
|
|
if (r < 0 && errno != EAGAIN && errno != EINTR)
|
|
|
|
{
|
|
|
|
fprintf(stderr, "Error resetting eventfd: %s\n", strerror(errno));
|
|
|
|
}
|
|
|
|
}
|
2019-11-05 02:12:04 +03:00
|
|
|
struct io_uring_cqe *cqe;
|
2019-11-17 17:45:39 +03:00
|
|
|
while (!io_uring_peek_cqe(&ring, &cqe))
|
2019-11-05 02:12:04 +03:00
|
|
|
{
|
|
|
|
struct ring_data_t *d = (struct ring_data_t*)cqe->user_data;
|
2019-11-13 21:17:04 +03:00
|
|
|
if (d->callback)
|
2019-11-05 02:12:04 +03:00
|
|
|
{
|
2020-10-30 01:06:34 +03:00
|
|
|
// First free ring_data item, then call the callback
|
|
|
|
// so it has at least 1 free slot for the next event
|
|
|
|
// which is required for EPOLLET to function properly
|
|
|
|
struct ring_data_t dl;
|
|
|
|
dl.iov = d->iov;
|
|
|
|
dl.res = cqe->res;
|
|
|
|
dl.callback.swap(d->callback);
|
|
|
|
free_ring_data[free_ring_data_ptr++] = d - ring_datas;
|
|
|
|
dl.callback(&dl);
|
2019-11-05 02:12:04 +03:00
|
|
|
}
|
2020-10-30 01:06:34 +03:00
|
|
|
else
|
2021-02-02 01:26:54 +03:00
|
|
|
{
|
2023-07-20 23:27:34 +03:00
|
|
|
fprintf(stderr, "Warning: empty callback in SQE\n");
|
2020-10-30 01:06:34 +03:00
|
|
|
free_ring_data[free_ring_data_ptr++] = d - ring_datas;
|
2021-02-02 01:26:54 +03:00
|
|
|
}
|
2019-11-16 02:12:27 +03:00
|
|
|
io_uring_cqe_seen(&ring, cqe);
|
2019-11-05 02:12:04 +03:00
|
|
|
}
|
2019-11-18 14:08:11 +03:00
|
|
|
do
|
2019-11-17 17:45:39 +03:00
|
|
|
{
|
2019-11-18 14:08:11 +03:00
|
|
|
loop_again = false;
|
|
|
|
for (int i = 0; i < consumers.size(); i++)
|
|
|
|
{
|
2020-03-04 21:00:15 +03:00
|
|
|
consumers[i]->loop();
|
2023-02-07 01:59:34 +03:00
|
|
|
if (immediate_queue.size())
|
|
|
|
{
|
|
|
|
immediate_queue2.swap(immediate_queue);
|
|
|
|
for (auto & cb: immediate_queue2)
|
|
|
|
cb();
|
|
|
|
immediate_queue2.clear();
|
|
|
|
}
|
2019-11-18 14:08:11 +03:00
|
|
|
}
|
|
|
|
} while (loop_again);
|
2019-11-05 02:12:04 +03:00
|
|
|
}
|
2019-12-17 01:44:08 +03:00
|
|
|
|
|
|
|
unsigned ring_loop_t::save()
|
|
|
|
{
|
|
|
|
return ring.sq.sqe_tail;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ring_loop_t::restore(unsigned sqe_tail)
|
|
|
|
{
|
|
|
|
assert(ring.sq.sqe_tail >= sqe_tail);
|
|
|
|
for (unsigned i = sqe_tail; i < ring.sq.sqe_tail; i++)
|
|
|
|
{
|
|
|
|
free_ring_data[free_ring_data_ptr++] = ((ring_data_t*)ring.sq.sqes[i & *ring.sq.kring_mask].user_data) - ring_datas;
|
|
|
|
}
|
|
|
|
ring.sq.sqe_tail = sqe_tail;
|
|
|
|
}
|
2022-01-31 02:17:54 +03:00
|
|
|
|
|
|
|
int ring_loop_t::sqes_left()
|
|
|
|
{
|
|
|
|
struct io_uring_sq *sq = &ring.sq;
|
|
|
|
unsigned int head = io_uring_smp_load_acquire(sq->khead);
|
|
|
|
unsigned int next = sq->sqe_tail + 1;
|
2022-02-01 02:50:16 +03:00
|
|
|
int left = *sq->kring_entries - (next - head);
|
|
|
|
if (left > free_ring_data_ptr)
|
|
|
|
{
|
|
|
|
// return min(sqes left, ring_datas left)
|
|
|
|
return free_ring_data_ptr;
|
|
|
|
}
|
|
|
|
return left;
|
2022-01-31 02:17:54 +03:00
|
|
|
}
|
2023-07-03 00:42:31 +03:00
|
|
|
|
|
|
|
int ring_loop_t::register_eventfd()
|
|
|
|
{
|
|
|
|
if (ring_eventfd >= 0)
|
|
|
|
{
|
|
|
|
return ring_eventfd;
|
|
|
|
}
|
|
|
|
ring_eventfd = eventfd(0, EFD_CLOEXEC|EFD_NONBLOCK);
|
|
|
|
if (ring_eventfd < 0)
|
|
|
|
{
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
int r = io_uring_register_eventfd(&ring, ring_eventfd);
|
|
|
|
if (r < 0)
|
|
|
|
{
|
|
|
|
close(ring_eventfd);
|
|
|
|
ring_eventfd = -1;
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
return ring_eventfd;
|
|
|
|
}
|