2020-09-17 23:02:40 +03:00
|
|
|
// Copyright (c) Vitaliy Filippov, 2019+
|
2021-02-06 01:26:07 +03:00
|
|
|
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
|
2020-09-17 23:02:40 +03:00
|
|
|
|
2019-11-05 02:12:04 +03:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#ifndef _LARGEFILE64_SOURCE
|
|
|
|
#define _LARGEFILE64_SOURCE
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <string.h>
|
2019-12-17 01:44:08 +03:00
|
|
|
#include <assert.h>
|
|
|
|
#include <liburing.h>
|
2019-11-05 02:12:04 +03:00
|
|
|
|
2020-10-06 02:35:11 +03:00
|
|
|
#include <string>
|
2019-11-05 02:12:04 +03:00
|
|
|
#include <functional>
|
|
|
|
#include <vector>
|
|
|
|
|
2019-11-17 21:39:30 +03:00
|
|
|
static inline void my_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd, const void *addr, unsigned len, off_t offset)
|
|
|
|
{
|
2022-01-31 22:49:40 +03:00
|
|
|
// Prepare a read/write operation without clearing user_data
|
|
|
|
// Very recently, 22 Dec 2021, liburing finally got this change too (8ecd3fd959634df81d66af8b3a69c16202a014e8)
|
|
|
|
// But all versions prior to it (sadly) clear user_data
|
|
|
|
__u64 user_data = sqe->user_data;
|
|
|
|
io_uring_prep_rw(op, sqe, fd, addr, len, offset);
|
|
|
|
sqe->user_data = user_data;
|
2019-11-17 21:39:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_readv(struct io_uring_sqe *sqe, int fd, const struct iovec *iovecs, unsigned nr_vecs, off_t offset)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd, void *buf, unsigned nbytes, off_t offset, int buf_index)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
|
|
|
|
sqe->buf_index = buf_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_writev(struct io_uring_sqe *sqe, int fd, const struct iovec *iovecs, unsigned nr_vecs, off_t offset)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd, const void *buf, unsigned nbytes, off_t offset, int buf_index)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
|
|
|
|
sqe->buf_index = buf_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd, struct msghdr *msg, unsigned flags)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_RECVMSG, sqe, fd, msg, 1, 0);
|
|
|
|
sqe->msg_flags = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_sendmsg(struct io_uring_sqe *sqe, int fd, const struct msghdr *msg, unsigned flags)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg, 1, 0);
|
|
|
|
sqe->msg_flags = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd, short poll_mask)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, NULL, 0, 0);
|
|
|
|
sqe->poll_events = poll_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_poll_remove(struct io_uring_sqe *sqe, void *user_data)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, 0, user_data, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_fsync(struct io_uring_sqe *sqe, int fd, unsigned fsync_flags)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_FSYNC, sqe, fd, NULL, 0, 0);
|
|
|
|
sqe->fsync_flags = fsync_flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_nop(struct io_uring_sqe *sqe)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_NOP, sqe, 0, NULL, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_timeout(struct io_uring_sqe *sqe, struct __kernel_timespec *ts, unsigned count, unsigned flags)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_TIMEOUT, sqe, 0, ts, 1, count);
|
|
|
|
sqe->timeout_flags = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_timeout_remove(struct io_uring_sqe *sqe, __u64 user_data, unsigned flags)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, 0, (void *)user_data, 0, 0);
|
|
|
|
sqe->timeout_flags = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_accept(struct io_uring_sqe *sqe, int fd, struct sockaddr *addr, socklen_t *addrlen, int flags)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr, 0, (__u64) addrlen);
|
|
|
|
sqe->accept_flags = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void my_uring_prep_cancel(struct io_uring_sqe *sqe, void *user_data, int flags)
|
|
|
|
{
|
|
|
|
my_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, 0, user_data, 0, 0);
|
|
|
|
sqe->cancel_flags = flags;
|
|
|
|
}
|
|
|
|
|
2019-11-05 02:12:04 +03:00
|
|
|
struct ring_data_t
|
|
|
|
{
|
|
|
|
struct iovec iov; // for single-entry read/write operations
|
|
|
|
int res;
|
2019-11-13 21:17:04 +03:00
|
|
|
std::function<void(ring_data_t*)> callback;
|
2019-11-05 02:12:04 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ring_consumer_t
|
|
|
|
{
|
2019-11-05 02:43:21 +03:00
|
|
|
std::function<void(void)> loop;
|
2019-11-05 02:12:04 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
class ring_loop_t
|
|
|
|
{
|
2020-04-25 23:11:50 +03:00
|
|
|
std::vector<std::pair<int,std::function<void()>>> get_sqe_queue;
|
2020-03-04 21:00:15 +03:00
|
|
|
std::vector<ring_consumer_t*> consumers;
|
2019-12-17 01:44:08 +03:00
|
|
|
struct ring_data_t *ring_datas;
|
|
|
|
int *free_ring_data;
|
2020-04-25 23:11:50 +03:00
|
|
|
int wait_sqe_id;
|
2019-12-17 01:44:08 +03:00
|
|
|
unsigned free_ring_data_ptr;
|
2019-11-28 02:27:17 +03:00
|
|
|
bool loop_again;
|
2019-11-16 02:12:27 +03:00
|
|
|
struct io_uring ring;
|
2019-12-17 01:44:08 +03:00
|
|
|
public:
|
2019-11-05 02:12:04 +03:00
|
|
|
ring_loop_t(int qd);
|
|
|
|
~ring_loop_t();
|
2020-03-04 21:00:15 +03:00
|
|
|
void register_consumer(ring_consumer_t *consumer);
|
|
|
|
void unregister_consumer(ring_consumer_t *consumer);
|
2019-12-17 01:44:08 +03:00
|
|
|
|
2019-11-17 21:39:30 +03:00
|
|
|
inline struct io_uring_sqe* get_sqe()
|
|
|
|
{
|
2019-12-17 01:44:08 +03:00
|
|
|
if (free_ring_data_ptr == 0)
|
|
|
|
return NULL;
|
2019-11-17 21:39:30 +03:00
|
|
|
struct io_uring_sqe* sqe = io_uring_get_sqe(&ring);
|
|
|
|
if (sqe)
|
2021-02-02 01:26:54 +03:00
|
|
|
{
|
|
|
|
*sqe = { 0 };
|
2019-12-17 01:44:08 +03:00
|
|
|
io_uring_sqe_set_data(sqe, ring_datas + free_ring_data[--free_ring_data_ptr]);
|
2021-02-02 01:26:54 +03:00
|
|
|
}
|
2019-11-17 21:39:30 +03:00
|
|
|
return sqe;
|
|
|
|
}
|
2020-04-25 23:11:50 +03:00
|
|
|
inline int wait_sqe(std::function<void()> cb)
|
|
|
|
{
|
|
|
|
get_sqe_queue.push_back({ wait_sqe_id, cb });
|
|
|
|
return wait_sqe_id++;
|
|
|
|
}
|
|
|
|
inline void cancel_wait_sqe(int wait_id)
|
|
|
|
{
|
|
|
|
for (int i = 0; i < get_sqe_queue.size(); i++)
|
|
|
|
{
|
|
|
|
if (get_sqe_queue[i].first == wait_id)
|
|
|
|
{
|
|
|
|
get_sqe_queue.erase(get_sqe_queue.begin()+i, get_sqe_queue.begin()+i+1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-05 02:43:21 +03:00
|
|
|
inline int submit()
|
|
|
|
{
|
2019-11-16 02:12:27 +03:00
|
|
|
return io_uring_submit(&ring);
|
2019-11-05 02:43:21 +03:00
|
|
|
}
|
2019-11-26 02:18:37 +03:00
|
|
|
inline int wait()
|
|
|
|
{
|
|
|
|
struct io_uring_cqe *cqe;
|
|
|
|
return io_uring_wait_cqe(&ring, &cqe);
|
|
|
|
}
|
2022-01-31 02:17:54 +03:00
|
|
|
int sqes_left();
|
2019-12-17 01:44:08 +03:00
|
|
|
inline unsigned space_left()
|
|
|
|
{
|
|
|
|
return free_ring_data_ptr;
|
|
|
|
}
|
2020-03-04 21:00:15 +03:00
|
|
|
inline bool has_work()
|
2019-12-17 01:44:08 +03:00
|
|
|
{
|
|
|
|
return loop_again;
|
|
|
|
}
|
|
|
|
|
|
|
|
void loop();
|
|
|
|
void wakeup();
|
|
|
|
|
|
|
|
unsigned save();
|
|
|
|
void restore(unsigned sqe_tail);
|
2019-11-05 02:12:04 +03:00
|
|
|
};
|